bitkeeper revision 1.1159.36.1 (41224664I9csn5Rc83-pM_anatKyCg)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 17 Aug 2004 17:54:44 +0000 (17:54 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 17 Aug 2004 17:54:44 +0000 (17:54 +0000)
Fix VESA BIOS mapping issue. Clean up workqueue/taskqueue confusion.

20 files changed:
.rootkeys
linux-2.4.26-xen-sparse/arch/xen/mm/Makefile
linux-2.4.26-xen-sparse/arch/xen/mm/fault.c
linux-2.4.26-xen-sparse/include/asm-xen/pgalloc.h
linux-2.4.26-xen-sparse/include/asm-xen/pgtable.h
linux-2.4.26-xen-sparse/include/asm-xen/queues.h [new file with mode: 0644]
linux-2.4.26-xen-sparse/mkbuildtree
linux-2.4.26-xen-sparse/mm/mmap.c [deleted file]
linux-2.6.7-xen-sparse/arch/xen/i386/mm/Makefile
linux-2.6.7-xen-sparse/arch/xen/i386/mm/fault.c
linux-2.6.7-xen-sparse/arch/xen/i386/mm/mmap.c [deleted file]
linux-2.6.7-xen-sparse/arch/xen/i386/mm/pgtable.c
linux-2.6.7-xen-sparse/arch/xen/kernel/ctrl_if.c
linux-2.6.7-xen-sparse/arch/xen/kernel/fixup.c
linux-2.6.7-xen-sparse/arch/xen/kernel/reboot.c
linux-2.6.7-xen-sparse/drivers/xen/console/console.c
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable.h
linux-2.6.7-xen-sparse/include/asm-xen/ctrl_if.h
linux-2.6.7-xen-sparse/include/asm-xen/queues.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/mm/mmap.c [deleted file]

index 4bfb84abd1dd4b7fd37dcc5e7323e77d0b8b14a7..ab4ce594acfdc1ff0a5705c5982fb48783b02ae6 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 3e5a4e67X7JyupgdYkgDX19Huj2sAw linux-2.4.26-xen-sparse/include/asm-xen/pgtable-2level.h
 3e5a4e67gr4NLGtQ5CvSLimMYZlkOA linux-2.4.26-xen-sparse/include/asm-xen/pgtable.h
 3e5a4e676uK4xErTBDH6XJREn9LSyg linux-2.4.26-xen-sparse/include/asm-xen/processor.h
+41224663YBCUMX1kVo_HRUtgaHTi7w linux-2.4.26-xen-sparse/include/asm-xen/queues.h
 3e5a4e68uJz-xI0IBVMD7xRLQKJDFg linux-2.4.26-xen-sparse/include/asm-xen/segment.h
 3e5a4e68Nfdh6QcOKUTGCaYkf2LmYA linux-2.4.26-xen-sparse/include/asm-xen/smp.h
 4062f7e2PzFOUGT0PaE7A0VprTU3JQ linux-2.4.26-xen-sparse/include/asm-xen/synch_bitops.h
 3e6e7c1efbQe93xCvOpOVCnXTMmQ5w linux-2.4.26-xen-sparse/mkbuildtree
 406aeeafkrnCuIVWLFv3kfn4uAD5Eg linux-2.4.26-xen-sparse/mm/highmem.c
 3e5a4e68GxCIaFH4sy01v1wjapetaA linux-2.4.26-xen-sparse/mm/memory.c
-411ce99d_uOUTK61pkqbdIAi1CIaSA linux-2.4.26-xen-sparse/mm/mmap.c
 3f108af5VxPkLv13tXpXgoRKALQtXQ linux-2.4.26-xen-sparse/mm/mprotect.c
 3e5a4e681xMPdF9xCMwpyfuYMySU5g linux-2.4.26-xen-sparse/mm/mremap.c
 409ba2e7akOFqQUg6Qyg2s28xcXiMg linux-2.4.26-xen-sparse/mm/page_alloc.c
 40f562383SKvDStdtrvzr5fyCbW4rw linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c
 40f56239xcNylAxuGsQHwi1AyMLV8w linux-2.6.7-xen-sparse/arch/xen/i386/mm/init.c
 41062ab7CjxC1UBaFhOMWWdhHkIUyg linux-2.6.7-xen-sparse/arch/xen/i386/mm/ioremap.c
-411b9db3oFpYQc4C-_mO2lRTcSz8UQ linux-2.6.7-xen-sparse/arch/xen/i386/mm/mmap.c
 40f5623906UYHv1rsVUeRc0tFT0dWw linux-2.6.7-xen-sparse/arch/xen/i386/mm/pgtable.c
 4107adf12ndy94MidCaivDibJ3pPAg linux-2.6.7-xen-sparse/arch/xen/i386/pci/Makefile
 4107adf1WcCgkhsdLTRGX52cOG1vJg linux-2.6.7-xen-sparse/arch/xen/i386/pci/direct.c
 40f5623aGPlsm0u1LTO-NVZ6AGzNRQ linux-2.6.7-xen-sparse/include/asm-xen/hypervisor.h
 40f5623cndVUFlkxpf7Lfx7xu8madQ linux-2.6.7-xen-sparse/include/asm-xen/multicall.h
 3f108af1ylCIm82H052FVTfXACBHrw linux-2.6.7-xen-sparse/include/asm-xen/proc_cmd.h
+4122466356eIBnC9ot44WSVVIFyhQA linux-2.6.7-xen-sparse/include/asm-xen/queues.h
 3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ linux-2.6.7-xen-sparse/include/asm-xen/suspend.h
 3f689063BoW-HWV3auUJ-OqXfcGArw linux-2.6.7-xen-sparse/include/asm-xen/xen_proc.h
 40f56a0ddHCSs3501MY4hRf22tctOw linux-2.6.7-xen-sparse/mkbuildtree
-411b9db3dpQAK-pcP8WwcRHZGn2eKg linux-2.6.7-xen-sparse/mm/mmap.c
 410a94a4KT6I6X0LVc7djB39tRDp4g linux-2.6.7-xen-sparse/mm/page_alloc.c
 40e1b09db5mN69Ijj0X_Eol-S7dXiw tools/Make.defs
 3f776bd1Hy9rn69ntXBhPReUFw9IEA tools/Makefile
index 45e189775d038b37cfd2e695f867d4b3963bf5d1..d0d16114b631d5183ea38d63347509621f418148 100644 (file)
@@ -9,7 +9,7 @@
 
 O_TARGET := mm.o
 
-obj-y   := init.o fault.o extable.o pageattr.o hypervisor.o ioremap.o mmap.o
+obj-y   := init.o fault.o extable.o pageattr.o hypervisor.o ioremap.o
 
 export-objs := pageattr.o
 
index 94f8cf95a6c0a9a3507004e64735a0cf2013a1b7..8aeb7fa1737ae4c56aa767d59b96bc3f5d9c6a52 100644 (file)
@@ -121,10 +121,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs,
         * (error_code & 4) == 0, and that the fault was not a
         * protection error (error_code & 1) == 0.
         */
-       if (unlikely(address >= TASK_SIZE) ||
-           unlikely(address < (FIRST_USER_PGD_NR<<PGDIR_SHIFT)))
-               if (!(error_code & 5))
-                       goto vmalloc_fault;
+       if (address >= TASK_SIZE && !(error_code & 5))
+               goto vmalloc_fault;
 
        mm = tsk->mm;
        info.si_code = SEGV_MAPERR;
index 6de5a0c137c71f1c2c9035177ccc4adc1aab9e54..a0d9d506efaf47a3f5d3f916fdf60594dc60e6fa 100644 (file)
@@ -54,15 +54,11 @@ static inline pgd_t *get_pgd_slow(void)
                        if (!pmd)
                                goto out_oom;
                        clear_page(pmd);
-                       set_pgd(pgd + FIRST_USER_PGD_NR, __pgd(1 + __pa(pmd)));
+                       set_pgd(pgd + i, __pgd(1 + __pa(pmd)));
                }
-               memcpy(pgd,
-                       swapper_pg_dir,
-                       FIRST_USER_PGD_NR * sizeof(pgd_t));
-               memcpy(pgd + FIRST_USER_PGD_NR + USER_PTRS_PER_PGD,
-                       swapper_pg_dir + FIRST_USER_PGD_NR + USER_PTRS_PER_PGD,
-                       (PTRS_PER_PGD - USER_PTRS_PER_PGD -
-                        FIRST_USER_PGD_NR) * sizeof(pgd_t));
+               memcpy(pgd + USER_PTRS_PER_PGD,
+                       init_mm.pgd + USER_PTRS_PER_PGD,
+                       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
        }
        return pgd;
 out_oom:
@@ -79,15 +75,10 @@ static inline pgd_t *get_pgd_slow(void)
        pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
 
        if (pgd) {
-               memset(pgd + FIRST_USER_PGD_NR,
-                       0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-               memcpy(pgd,
-                       init_mm.pgd,
-                       FIRST_USER_PGD_NR * sizeof(pgd_t));
-               memcpy(pgd + FIRST_USER_PGD_NR + USER_PTRS_PER_PGD,
-                       init_mm.pgd + FIRST_USER_PGD_NR + USER_PTRS_PER_PGD,
-                       (PTRS_PER_PGD - USER_PTRS_PER_PGD -
-                        FIRST_USER_PGD_NR) * sizeof(pgd_t));
+               memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+               memcpy(pgd + USER_PTRS_PER_PGD,
+                       init_mm.pgd + USER_PTRS_PER_PGD,
+                       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
                 __make_page_readonly(pgd);
                queue_pgd_pin(__pa(pgd));
        }
index 5faf5350fa74210356c2534762bd788cd273cfdf..dc25864d2cda858f0c2eaaefde5e7a325f1496fa 100644 (file)
@@ -83,16 +83,16 @@ extern void pgtable_cache_init(void);
 #define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
 #define PGDIR_MASK     (~(PGDIR_SIZE-1))
 
-#define FIRST_USER_PGD_NR      (1)
-#define USER_PTRS_PER_PGD      ((TASK_SIZE/PGDIR_SIZE)-FIRST_USER_PGD_NR)
+#define USER_PTRS_PER_PGD      (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_PGD_NR      0
 
-#if 0 /* XEN */
 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
+
 #define TWOLEVEL_PGDIR_SHIFT   22
 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
 #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
-#endif
+
 
 #ifndef __ASSEMBLY__
 /* 4MB is just a nice "safety zone". Also, we align to a fresh pde. */
@@ -367,7 +367,4 @@ static inline unsigned long arbitrary_virt_to_phys(void *va)
 
 #define io_remap_page_range remap_page_range
 
-#define HAVE_ARCH_UNMAPPED_AREA
-#define HAVE_ARCH_CHECK_FIXED_MAPPING
-
 #endif /* _I386_PGTABLE_H */
diff --git a/linux-2.4.26-xen-sparse/include/asm-xen/queues.h b/linux-2.4.26-xen-sparse/include/asm-xen/queues.h
new file mode 100644 (file)
index 0000000..64a4cbe
--- /dev/null
@@ -0,0 +1,28 @@
+
+/*
+ * Oh dear. Task queues were removed from Linux 2.6 and replaced by work 
+ * queues. Unfortunately the semantics is not the same. With task queues we 
+ * can defer work until a particular event occurs -- this is not
+ * straightforwardly done with work queues (queued work is performed asap, or
+ * after some fixed timeout). Conversely, work queues are a (slightly) neater
+ * way of deferring work to a process context than using task queues in 2.4.
+ * 
+ * So, what we do here is a bit weird:
+ *  1. On 2.4, we emulate work queues over task queues.
+ *  2. On 2.6, we emulate task queues over work queues.
+ * 
+ * Note how much harder the latter is. :-)
+ */
+
+#ifndef __QUEUES_H__
+#define __QUEUES_H__
+
+#include <linux/version.h>
+#include <linux/list.h>
+#include <linux/tqueue.h>
+
+#define DECLARE_WORK(_name, _fn, _arg) \
+    struct tq_struct _name = { .routine = _fn, .data = _arg }
+#define schedule_work(_w) schedule_task(_w)
+
+#endif /* __QUEUES_H__ */
index 209f92f7327a2a34e538ed505af7563e4f779cff..ca407be27d8db3abfb270683d56e5ac631d584ff 100755 (executable)
@@ -243,7 +243,6 @@ cd ${AD}/arch/xen/mm
 ln -sf ../../i386/mm/extable.c 
 ln -sf ../../i386/mm/pageattr.c 
 ln -sf ../../../${LINUX_26}/arch/xen/i386/mm/hypervisor.c
-ln -sf ../../../${LINUX_26}/arch/xen/i386/mm/mmap.c
 
 cd ${AD}/arch/xen/drivers/console
 ln -sf ../../../../${LINUX_26}/drivers/xen/console/console.c 
diff --git a/linux-2.4.26-xen-sparse/mm/mmap.c b/linux-2.4.26-xen-sparse/mm/mmap.c
deleted file mode 100644 (file)
index ed7b11c..0000000
+++ /dev/null
@@ -1,1219 +0,0 @@
-/*
- *     linux/mm/mmap.c
- *
- * Written by obz.
- */
-#include <linux/slab.h>
-#include <linux/shm.h>
-#include <linux/mman.h>
-#include <linux/pagemap.h>
-#include <linux/swap.h>
-#include <linux/swapctl.h>
-#include <linux/smp_lock.h>
-#include <linux/init.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/personality.h>
-#include <linux/mount.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
-
-/*
- * WARNING: the debugging will use recursive algorithms so never enable this
- * unless you know what you are doing.
- */
-#undef DEBUG_MM_RB
-
-/* description of effects of mapping type and prot in current implementation.
- * this is due to the limited x86 page protection hardware.  The expected
- * behavior is in parens:
- *
- * map_type    prot
- *             PROT_NONE       PROT_READ       PROT_WRITE      PROT_EXEC
- * MAP_SHARED  r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
- *             w: (no) no      w: (no) no      w: (yes) yes    w: (no) no
- *             x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
- *             
- * MAP_PRIVATE r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
- *             w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
- *             x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
- *
- */
-pgprot_t protection_map[16] = {
-       __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
-       __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
-};
-
-int sysctl_overcommit_memory;
-int max_map_count = DEFAULT_MAX_MAP_COUNT;
-
-/* Check that a process has enough memory to allocate a
- * new virtual mapping.
- */
-int vm_enough_memory(long pages)
-{
-       /* Stupid algorithm to decide if we have enough memory: while
-        * simple, it hopefully works in most obvious cases.. Easy to
-        * fool it, but this should catch most mistakes.
-        */
-       /* 23/11/98 NJC: Somewhat less stupid version of algorithm,
-        * which tries to do "TheRightThing".  Instead of using half of
-        * (buffers+cache), use the minimum values.  Allow an extra 2%
-        * of num_physpages for safety margin.
-        */
-
-       unsigned long free;
-       
-        /* Sometimes we want to use more memory than we have. */
-       if (sysctl_overcommit_memory)
-           return 1;
-
-       /* The page cache contains buffer pages these days.. */
-       free = page_cache_size;
-       free += nr_free_pages();
-       free += nr_swap_pages;
-
-       /*
-        * This double-counts: the nrpages are both in the page-cache
-        * and in the swapper space. At the same time, this compensates
-        * for the swap-space over-allocation (ie "nr_swap_pages" being
-        * too small.
-        */
-       free += swapper_space.nrpages;
-
-       /*
-        * The code below doesn't account for free space in the inode
-        * and dentry slab cache, slab cache fragmentation, inodes and
-        * dentries which will become freeable under VM load, etc.
-        * Lets just hope all these (complex) factors balance out...
-        */
-       free += (dentry_stat.nr_unused * sizeof(struct dentry)) >> PAGE_SHIFT;
-       free += (inodes_stat.nr_unused * sizeof(struct inode)) >> PAGE_SHIFT;
-
-       return free > pages;
-}
-
-/* Remove one vm structure from the inode's i_mapping address space. */
-static inline void __remove_shared_vm_struct(struct vm_area_struct *vma)
-{
-       struct file * file = vma->vm_file;
-
-       if (file) {
-               struct inode *inode = file->f_dentry->d_inode;
-               if (vma->vm_flags & VM_DENYWRITE)
-                       atomic_inc(&inode->i_writecount);
-               if(vma->vm_next_share)
-                       vma->vm_next_share->vm_pprev_share = vma->vm_pprev_share;
-               *vma->vm_pprev_share = vma->vm_next_share;
-       }
-}
-
-static inline void remove_shared_vm_struct(struct vm_area_struct *vma)
-{
-       lock_vma_mappings(vma);
-       __remove_shared_vm_struct(vma);
-       unlock_vma_mappings(vma);
-}
-
-void lock_vma_mappings(struct vm_area_struct *vma)
-{
-       struct address_space *mapping;
-
-       mapping = NULL;
-       if (vma->vm_file)
-               mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
-       if (mapping)
-               spin_lock(&mapping->i_shared_lock);
-}
-
-void unlock_vma_mappings(struct vm_area_struct *vma)
-{
-       struct address_space *mapping;
-
-       mapping = NULL;
-       if (vma->vm_file)
-               mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
-       if (mapping)
-               spin_unlock(&mapping->i_shared_lock);
-}
-
-/*
- *  sys_brk() for the most part doesn't need the global kernel
- *  lock, except when an application is doing something nasty
- *  like trying to un-brk an area that has already been mapped
- *  to a regular file.  in this case, the unmapping will need
- *  to invoke file system routines that need the global lock.
- */
-asmlinkage unsigned long sys_brk(unsigned long brk)
-{
-       unsigned long rlim, retval;
-       unsigned long newbrk, oldbrk;
-       struct mm_struct *mm = current->mm;
-
-       down_write(&mm->mmap_sem);
-
-       if (brk < mm->end_code)
-               goto out;
-       newbrk = PAGE_ALIGN(brk);
-       oldbrk = PAGE_ALIGN(mm->brk);
-       if (oldbrk == newbrk)
-               goto set_brk;
-
-       /* Always allow shrinking brk. */
-       if (brk <= mm->brk) {
-               if (!do_munmap(mm, newbrk, oldbrk-newbrk))
-                       goto set_brk;
-               goto out;
-       }
-
-       /* Check against rlimit.. */
-       rlim = current->rlim[RLIMIT_DATA].rlim_cur;
-       if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
-               goto out;
-
-       /* Check against existing mmap mappings. */
-       if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
-               goto out;
-
-       /* Check if we have enough memory.. */
-       if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT))
-               goto out;
-
-       /* Ok, looks good - let it rip. */
-       if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
-               goto out;
-set_brk:
-       mm->brk = brk;
-out:
-       retval = mm->brk;
-       up_write(&mm->mmap_sem);
-       return retval;
-}
-
-/* Combine the mmap "prot" and "flags" argument into one "vm_flags" used
- * internally. Essentially, translate the "PROT_xxx" and "MAP_xxx" bits
- * into "VM_xxx".
- */
-static inline unsigned long calc_vm_flags(unsigned long prot, unsigned long flags)
-{
-#define _trans(x,bit1,bit2) \
-((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
-
-       unsigned long prot_bits, flag_bits;
-       prot_bits =
-               _trans(prot, PROT_READ, VM_READ) |
-               _trans(prot, PROT_WRITE, VM_WRITE) |
-               _trans(prot, PROT_EXEC, VM_EXEC);
-       flag_bits =
-               _trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |
-               _trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |
-               _trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);
-       return prot_bits | flag_bits;
-#undef _trans
-}
-
-#ifdef DEBUG_MM_RB
-static int browse_rb(rb_node_t * rb_node) {
-       int i = 0;
-       if (rb_node) {
-               i++;
-               i += browse_rb(rb_node->rb_left);
-               i += browse_rb(rb_node->rb_right);
-       }
-       return i;
-}
-
-static void validate_mm(struct mm_struct * mm) {
-       int bug = 0;
-       int i = 0;
-       struct vm_area_struct * tmp = mm->mmap;
-       while (tmp) {
-               tmp = tmp->vm_next;
-               i++;
-       }
-       if (i != mm->map_count)
-               printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
-       i = browse_rb(mm->mm_rb.rb_node);
-       if (i != mm->map_count)
-               printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
-       if (bug)
-               BUG();
-}
-#else
-#define validate_mm(mm) do { } while (0)
-#endif
-
-static struct vm_area_struct * find_vma_prepare(struct mm_struct * mm, unsigned long addr,
-                                               struct vm_area_struct ** pprev,
-                                               rb_node_t *** rb_link, rb_node_t ** rb_parent)
-{
-       struct vm_area_struct * vma;
-       rb_node_t ** __rb_link, * __rb_parent, * rb_prev;
-
-       __rb_link = &mm->mm_rb.rb_node;
-       rb_prev = __rb_parent = NULL;
-       vma = NULL;
-
-       while (*__rb_link) {
-               struct vm_area_struct *vma_tmp;
-
-               __rb_parent = *__rb_link;
-               vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
-
-               if (vma_tmp->vm_end > addr) {
-                       vma = vma_tmp;
-                       if (vma_tmp->vm_start <= addr)
-                               return vma;
-                       __rb_link = &__rb_parent->rb_left;
-               } else {
-                       rb_prev = __rb_parent;
-                       __rb_link = &__rb_parent->rb_right;
-               }
-       }
-
-       *pprev = NULL;
-       if (rb_prev)
-               *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
-       *rb_link = __rb_link;
-       *rb_parent = __rb_parent;
-       return vma;
-}
-
-static inline void __vma_link_list(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,
-                                  rb_node_t * rb_parent)
-{
-       if (prev) {
-               vma->vm_next = prev->vm_next;
-               prev->vm_next = vma;
-       } else {
-               mm->mmap = vma;
-               if (rb_parent)
-                       vma->vm_next = rb_entry(rb_parent, struct vm_area_struct, vm_rb);
-               else
-                       vma->vm_next = NULL;
-       }
-}
-
-static inline void __vma_link_rb(struct mm_struct * mm, struct vm_area_struct * vma,
-                                rb_node_t ** rb_link, rb_node_t * rb_parent)
-{
-       rb_link_node(&vma->vm_rb, rb_parent, rb_link);
-       rb_insert_color(&vma->vm_rb, &mm->mm_rb);
-}
-
-static inline void __vma_link_file(struct vm_area_struct * vma)
-{
-       struct file * file;
-
-       file = vma->vm_file;
-       if (file) {
-               struct inode * inode = file->f_dentry->d_inode;
-               struct address_space *mapping = inode->i_mapping;
-               struct vm_area_struct **head;
-
-               if (vma->vm_flags & VM_DENYWRITE)
-                       atomic_dec(&inode->i_writecount);
-
-               head = &mapping->i_mmap;
-               if (vma->vm_flags & VM_SHARED)
-                       head = &mapping->i_mmap_shared;
-      
-               /* insert vma into inode's share list */
-               if((vma->vm_next_share = *head) != NULL)
-                       (*head)->vm_pprev_share = &vma->vm_next_share;
-               *head = vma;
-               vma->vm_pprev_share = head;
-       }
-}
-
-static void __vma_link(struct mm_struct * mm, struct vm_area_struct * vma,  struct vm_area_struct * prev,
-                      rb_node_t ** rb_link, rb_node_t * rb_parent)
-{
-       __vma_link_list(mm, vma, prev, rb_parent);
-       __vma_link_rb(mm, vma, rb_link, rb_parent);
-       __vma_link_file(vma);
-}
-
-static inline void vma_link(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,
-                           rb_node_t ** rb_link, rb_node_t * rb_parent)
-{
-       lock_vma_mappings(vma);
-       spin_lock(&mm->page_table_lock);
-       __vma_link(mm, vma, prev, rb_link, rb_parent);
-       spin_unlock(&mm->page_table_lock);
-       unlock_vma_mappings(vma);
-
-       mm->map_count++;
-       validate_mm(mm);
-}
-
-static int vma_merge(struct mm_struct * mm, struct vm_area_struct * prev,
-                    rb_node_t * rb_parent, unsigned long addr, unsigned long end, unsigned long vm_flags)
-{
-       spinlock_t * lock = &mm->page_table_lock;
-       if (!prev) {
-               prev = rb_entry(rb_parent, struct vm_area_struct, vm_rb);
-               goto merge_next;
-       }
-       if (prev->vm_end == addr && can_vma_merge(prev, vm_flags)) {
-               struct vm_area_struct * next;
-
-               spin_lock(lock);
-               prev->vm_end = end;
-               next = prev->vm_next;
-               if (next && prev->vm_end == next->vm_start && can_vma_merge(next, vm_flags)) {
-                       prev->vm_end = next->vm_end;
-                       __vma_unlink(mm, next, prev);
-                       spin_unlock(lock);
-
-                       mm->map_count--;
-                       kmem_cache_free(vm_area_cachep, next);
-                       return 1;
-               }
-               spin_unlock(lock);
-               return 1;
-       }
-
-       prev = prev->vm_next;
-       if (prev) {
- merge_next:
-               if (!can_vma_merge(prev, vm_flags))
-                       return 0;
-               if (end == prev->vm_start) {
-                       spin_lock(lock);
-                       prev->vm_start = addr;
-                       spin_unlock(lock);
-                       return 1;
-               }
-       }
-
-       return 0;
-}
-
-unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned long len,
-       unsigned long prot, unsigned long flags, unsigned long pgoff)
-{
-       struct mm_struct * mm = current->mm;
-       struct vm_area_struct * vma, * prev;
-       unsigned int vm_flags;
-       int correct_wcount = 0;
-       int error;
-       rb_node_t ** rb_link, * rb_parent;
-
-       if (file) {
-               if (!file->f_op || !file->f_op->mmap)
-                       return -ENODEV;
-
-               if ((prot & PROT_EXEC) && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
-                       return -EPERM;
-       }
-
-       if (!len)
-               return addr;
-
-       len = PAGE_ALIGN(len);
-
-       if (len > TASK_SIZE || len == 0)
-               return -EINVAL;
-
-       /* offset overflow? */
-       if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
-               return -EINVAL;
-
-       /* Too many mappings? */
-       if (mm->map_count > max_map_count)
-               return -ENOMEM;
-
-       /* Obtain the address to map to. we verify (or select) it and ensure
-        * that it represents a valid section of the address space.
-        */
-       addr = get_unmapped_area(file, addr, len, pgoff, flags);
-       if (addr & ~PAGE_MASK)
-               return addr;
-
-       /* Do simple checking here so the lower-level routines won't have
-        * to. we assume access permissions have been handled by the open
-        * of the memory object, so we don't do any here.
-        */
-       vm_flags = calc_vm_flags(prot,flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
-
-       /* mlock MCL_FUTURE? */
-       if (vm_flags & VM_LOCKED) {
-               unsigned long locked = mm->locked_vm << PAGE_SHIFT;
-               locked += len;
-               if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
-                       return -EAGAIN;
-       }
-
-       if (file) {
-               switch (flags & MAP_TYPE) {
-               case MAP_SHARED:
-                       if ((prot & PROT_WRITE) && !(file->f_mode & FMODE_WRITE))
-                               return -EACCES;
-
-                       /* Make sure we don't allow writing to an append-only file.. */
-                       if (IS_APPEND(file->f_dentry->d_inode) && (file->f_mode & FMODE_WRITE))
-                               return -EACCES;
-
-                       /* make sure there are no mandatory locks on the file. */
-                       if (locks_verify_locked(file->f_dentry->d_inode))
-                               return -EAGAIN;
-
-                       vm_flags |= VM_SHARED | VM_MAYSHARE;
-                       if (!(file->f_mode & FMODE_WRITE))
-                               vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
-
-                       /* fall through */
-               case MAP_PRIVATE:
-                       if (!(file->f_mode & FMODE_READ))
-                               return -EACCES;
-                       break;
-
-               default:
-                       return -EINVAL;
-               }
-       } else {
-               vm_flags |= VM_SHARED | VM_MAYSHARE;
-               switch (flags & MAP_TYPE) {
-               default:
-                       return -EINVAL;
-               case MAP_PRIVATE:
-                       vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
-                       /* fall through */
-               case MAP_SHARED:
-                       break;
-               }
-       }
-
-       /* Clear old maps */
-munmap_back:
-       vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
-       if (vma && vma->vm_start < addr + len) {
-               if (do_munmap(mm, addr, len))
-                       return -ENOMEM;
-               goto munmap_back;
-       }
-
-       /* Check against address space limit. */
-       if ((mm->total_vm << PAGE_SHIFT) + len
-           > current->rlim[RLIMIT_AS].rlim_cur)
-               return -ENOMEM;
-
-       /* Private writable mapping? Check memory availability.. */
-       if ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
-           !(flags & MAP_NORESERVE)                             &&
-           !vm_enough_memory(len >> PAGE_SHIFT))
-               return -ENOMEM;
-
-       /* Can we just expand an old anonymous mapping? */
-       if (!file && !(vm_flags & VM_SHARED) && rb_parent)
-               if (vma_merge(mm, prev, rb_parent, addr, addr + len, vm_flags))
-                       goto out;
-
-       /* Determine the object being mapped and call the appropriate
-        * specific mapper. the address has already been validated, but
-        * not unmapped, but the maps are removed from the list.
-        */
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-       if (!vma)
-               return -ENOMEM;
-
-       vma->vm_mm = mm;
-       vma->vm_start = addr;
-       vma->vm_end = addr + len;
-       vma->vm_flags = vm_flags;
-       vma->vm_page_prot = protection_map[vm_flags & 0x0f];
-       vma->vm_ops = NULL;
-       vma->vm_pgoff = pgoff;
-       vma->vm_file = NULL;
-       vma->vm_private_data = NULL;
-       vma->vm_raend = 0;
-
-       if (file) {
-               error = -EINVAL;
-               if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
-                       goto free_vma;
-               if (vm_flags & VM_DENYWRITE) {
-                       error = deny_write_access(file);
-                       if (error)
-                               goto free_vma;
-                       correct_wcount = 1;
-               }
-               vma->vm_file = file;
-               get_file(file);
-               error = file->f_op->mmap(file, vma);
-               if (error)
-                       goto unmap_and_free_vma;
-       } else if (flags & MAP_SHARED) {
-               error = shmem_zero_setup(vma);
-               if (error)
-                       goto free_vma;
-       }
-
-       /* Can addr have changed??
-        *
-        * Answer: Yes, several device drivers can do it in their
-        *         f_op->mmap method. -DaveM
-        */
-       if (addr != vma->vm_start) {
-               /*
-                * It is a bit too late to pretend changing the virtual
-                * area of the mapping, we just corrupted userspace
-                * in the do_munmap, so FIXME (not in 2.4 to avoid breaking
-                * the driver API).
-                */
-               struct vm_area_struct * stale_vma;
-               /* Since addr changed, we rely on the mmap op to prevent 
-                * collisions with existing vmas and just use find_vma_prepare 
-                * to update the tree pointers.
-                */
-               addr = vma->vm_start;
-               stale_vma = find_vma_prepare(mm, addr, &prev,
-                                               &rb_link, &rb_parent);
-               /*
-                * Make sure the lowlevel driver did its job right.
-                */
-               if (unlikely(stale_vma && stale_vma->vm_start < vma->vm_end)) {
-                       printk(KERN_ERR "buggy mmap operation: [<%p>]\n",
-                               file ? file->f_op->mmap : NULL);
-                       BUG();
-               }
-       }
-
-       vma_link(mm, vma, prev, rb_link, rb_parent);
-       if (correct_wcount)
-               atomic_inc(&file->f_dentry->d_inode->i_writecount);
-
-out:   
-       mm->total_vm += len >> PAGE_SHIFT;
-       if (vm_flags & VM_LOCKED) {
-               mm->locked_vm += len >> PAGE_SHIFT;
-               make_pages_present(addr, addr + len);
-       }
-       return addr;
-
-unmap_and_free_vma:
-       if (correct_wcount)
-               atomic_inc(&file->f_dentry->d_inode->i_writecount);
-       vma->vm_file = NULL;
-       fput(file);
-
-       /* Undo any partial mapping done by a device driver. */
-       zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start);
-free_vma:
-       kmem_cache_free(vm_area_cachep, vma);
-       return error;
-}
-
-/* Get an address range which is currently unmapped.
- * For shmat() with addr=0.
- *
- * Ugly calling convention alert:
- * Return value with the low bits set means error value,
- * ie
- *     if (ret & ~PAGE_MASK)
- *             error = ret;
- *
- * This function "knows" that -ENOMEM has the bits set.
- */
-#ifndef HAVE_ARCH_UNMAPPED_AREA
-static inline unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
-{
-       struct vm_area_struct *vma;
-
-       if (len > TASK_SIZE)
-               return -ENOMEM;
-
-       if (addr) {
-               addr = PAGE_ALIGN(addr);
-               vma = find_vma(current->mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
-                       return addr;
-       }
-       addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
-
-       for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
-               /* At this point:  (!vma || addr < vma->vm_end). */
-               if (TASK_SIZE - len < addr)
-                       return -ENOMEM;
-               if (!vma || addr + len <= vma->vm_start)
-                       return addr;
-               addr = vma->vm_end;
-       }
-}
-#else
-extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
-#endif 
-
-#ifndef HAVE_ARCH_CHECK_FIXED_MAPPING
-#define arch_check_fixed_mapping(_file,_addr,_len,_pgoff,_flags) 0
-#else
-extern unsigned long
-arch_check_fixed_mapping(struct file *, unsigned long, unsigned long,
-                       unsigned long, unsigned long);
-#endif
-
-unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
-{
-       unsigned long ret;
-
-       if (flags & MAP_FIXED) {
-               if (addr > TASK_SIZE - len)
-                       return -ENOMEM;
-               if (addr & ~PAGE_MASK)
-                       return -EINVAL;
-               ret = arch_check_fixed_mapping(file, addr, len, pgoff, flags);
-               if (ret != 0)
-                       return ret;
-               return addr;
-       }
-
-       if (file && file->f_op && file->f_op->get_unmapped_area)
-               return file->f_op->get_unmapped_area(file, addr, len, pgoff, flags);
-
-       return arch_get_unmapped_area(file, addr, len, pgoff, flags);
-}
-
-/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
-struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
-{
-       struct vm_area_struct *vma = NULL;
-
-       if (mm) {
-               /* Check the cache first. */
-               /* (Cache hit rate is typically around 35%.) */
-               vma = mm->mmap_cache;
-               if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
-                       rb_node_t * rb_node;
-
-                       rb_node = mm->mm_rb.rb_node;
-                       vma = NULL;
-
-                       while (rb_node) {
-                               struct vm_area_struct * vma_tmp;
-
-                               vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
-
-                               if (vma_tmp->vm_end > addr) {
-                                       vma = vma_tmp;
-                                       if (vma_tmp->vm_start <= addr)
-                                               break;
-                                       rb_node = rb_node->rb_left;
-                               } else
-                                       rb_node = rb_node->rb_right;
-                       }
-                       if (vma)
-                               mm->mmap_cache = vma;
-               }
-       }
-       return vma;
-}
-
-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
-struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
-                                     struct vm_area_struct **pprev)
-{
-       if (mm) {
-               /* Go through the RB tree quickly. */
-               struct vm_area_struct * vma;
-               rb_node_t * rb_node, * rb_last_right, * rb_prev;
-               
-               rb_node = mm->mm_rb.rb_node;
-               rb_last_right = rb_prev = NULL;
-               vma = NULL;
-
-               while (rb_node) {
-                       struct vm_area_struct * vma_tmp;
-
-                       vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
-
-                       if (vma_tmp->vm_end > addr) {
-                               vma = vma_tmp;
-                               rb_prev = rb_last_right;
-                               if (vma_tmp->vm_start <= addr)
-                                       break;
-                               rb_node = rb_node->rb_left;
-                       } else {
-                               rb_last_right = rb_node;
-                               rb_node = rb_node->rb_right;
-                       }
-               }
-               if (vma) {
-                       if (vma->vm_rb.rb_left) {
-                               rb_prev = vma->vm_rb.rb_left;
-                               while (rb_prev->rb_right)
-                                       rb_prev = rb_prev->rb_right;
-                       }
-                       *pprev = NULL;
-                       if (rb_prev)
-                               *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
-                       if ((rb_prev ? (*pprev)->vm_next : mm->mmap) != vma)
-                               BUG();
-                       return vma;
-               }
-       }
-       *pprev = NULL;
-       return NULL;
-}
-
-struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr)
-{
-       struct vm_area_struct * vma;
-       unsigned long start;
-
-       addr &= PAGE_MASK;
-       vma = find_vma(mm,addr);
-       if (!vma)
-               return NULL;
-       if (vma->vm_start <= addr)
-               return vma;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               return NULL;
-       start = vma->vm_start;
-       if (expand_stack(vma, addr))
-               return NULL;
-       if (vma->vm_flags & VM_LOCKED) {
-               make_pages_present(addr, start);
-       }
-       return vma;
-}
-
-/* Normal function to fix up a mapping
- * This function is the default for when an area has no specific
- * function.  This may be used as part of a more specific routine.
- * This function works out what part of an area is affected and
- * adjusts the mapping information.  Since the actual page
- * manipulation is done in do_mmap(), none need be done here,
- * though it would probably be more appropriate.
- *
- * By the time this function is called, the area struct has been
- * removed from the process mapping list, so it needs to be
- * reinserted if necessary.
- *
- * The 4 main cases are:
- *    Unmapping the whole area
- *    Unmapping from the start of the segment to a point in it
- *    Unmapping from an intermediate point to the end
- *    Unmapping between to intermediate points, making a hole.
- *
- * Case 4 involves the creation of 2 new areas, for each side of
- * the hole.  If possible, we reuse the existing area rather than
- * allocate a new one, and the return indicates whether the old
- * area was reused.
- */
-static struct vm_area_struct * unmap_fixup(struct mm_struct *mm, 
-       struct vm_area_struct *area, unsigned long addr, size_t len, 
-       struct vm_area_struct *extra)
-{
-       struct vm_area_struct *mpnt;
-       unsigned long end = addr + len;
-
-       area->vm_mm->total_vm -= len >> PAGE_SHIFT;
-       if (area->vm_flags & VM_LOCKED)
-               area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
-
-       /* Unmapping the whole area. */
-       if (addr == area->vm_start && end == area->vm_end) {
-               if (area->vm_ops && area->vm_ops->close)
-                       area->vm_ops->close(area);
-               if (area->vm_file)
-                       fput(area->vm_file);
-               kmem_cache_free(vm_area_cachep, area);
-               return extra;
-       }
-
-       /* Work out to one of the ends. */
-       if (end == area->vm_end) {
-               /*
-                * here area isn't visible to the semaphore-less readers
-                * so we don't need to update it under the spinlock.
-                */
-               area->vm_end = addr;
-               lock_vma_mappings(area);
-               spin_lock(&mm->page_table_lock);
-       } else if (addr == area->vm_start) {
-               area->vm_pgoff += (end - area->vm_start) >> PAGE_SHIFT;
-               /* same locking considerations of the above case */
-               area->vm_start = end;
-               lock_vma_mappings(area);
-               spin_lock(&mm->page_table_lock);
-       } else {
-       /* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */
-               /* Add end mapping -- leave beginning for below */
-               mpnt = extra;
-               extra = NULL;
-
-               mpnt->vm_mm = area->vm_mm;
-               mpnt->vm_start = end;
-               mpnt->vm_end = area->vm_end;
-               mpnt->vm_page_prot = area->vm_page_prot;
-               mpnt->vm_flags = area->vm_flags;
-               mpnt->vm_raend = 0;
-               mpnt->vm_ops = area->vm_ops;
-               mpnt->vm_pgoff = area->vm_pgoff + ((end - area->vm_start) >> PAGE_SHIFT);
-               mpnt->vm_file = area->vm_file;
-               mpnt->vm_private_data = area->vm_private_data;
-               if (mpnt->vm_file)
-                       get_file(mpnt->vm_file);
-               if (mpnt->vm_ops && mpnt->vm_ops->open)
-                       mpnt->vm_ops->open(mpnt);
-               area->vm_end = addr;    /* Truncate area */
-
-               /* Because mpnt->vm_file == area->vm_file this locks
-                * things correctly.
-                */
-               lock_vma_mappings(area);
-               spin_lock(&mm->page_table_lock);
-               __insert_vm_struct(mm, mpnt);
-       }
-
-       __insert_vm_struct(mm, area);
-       spin_unlock(&mm->page_table_lock);
-       unlock_vma_mappings(area);
-       return extra;
-}
-
-/*
- * Try to free as many page directory entries as we can,
- * without having to work very hard at actually scanning
- * the page tables themselves.
- *
- * Right now we try to free page tables if we have a nice
- * PGDIR-aligned area that got free'd up. We could be more
- * granular if we want to, but this is fast and simple,
- * and covers the bad cases.
- *
- * "prev", if it exists, points to a vma before the one
- * we just free'd - but there's no telling how much before.
- */
-static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev,
-       unsigned long start, unsigned long end)
-{
-       unsigned long first = start & PGDIR_MASK;
-       unsigned long last = end + PGDIR_SIZE - 1;
-       unsigned long start_index, end_index;
-
-       if (!prev) {
-               prev = mm->mmap;
-               if (!prev)
-                       goto no_mmaps;
-               if (prev->vm_end > start) {
-                       if (last > prev->vm_start)
-                               last = prev->vm_start;
-                       goto no_mmaps;
-               }
-       }
-       for (;;) {
-               struct vm_area_struct *next = prev->vm_next;
-
-               if (next) {
-                       if (next->vm_start < start) {
-                               prev = next;
-                               continue;
-                       }
-                       if (last > next->vm_start)
-                               last = next->vm_start;
-               }
-               if (prev->vm_end > first)
-                       first = prev->vm_end + PGDIR_SIZE - 1;
-               break;
-       }
-no_mmaps:
-       if (last < first)
-               return;
-       /*
-        * If the PGD bits are not consecutive in the virtual address, the
-        * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
-        */
-       start_index = pgd_index(first);
-       end_index = pgd_index(last);
-       if (end_index > start_index) {
-               clear_page_tables(mm, start_index, end_index - start_index);
-               flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK);
-       }
-}
-
-/* Munmap is split into 2 main parts -- this part which finds
- * what needs doing, and the areas themselves, which do the
- * work.  This now handles partial unmappings.
- * Jeremy Fitzhardine <jeremy@sw.oz.au>
- */
-int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
-{
-       struct vm_area_struct *mpnt, *prev, **npp, *free, *extra;
-
-       if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
-               return -EINVAL;
-
-       if ((len = PAGE_ALIGN(len)) == 0)
-               return -EINVAL;
-
-       /* Check if this memory area is ok - put it on the temporary
-        * list if so..  The checks here are pretty simple --
-        * every area affected in some way (by any overlap) is put
-        * on the list.  If nothing is put on, nothing is affected.
-        */
-       mpnt = find_vma_prev(mm, addr, &prev);
-       if (!mpnt)
-               return 0;
-       /* we have  addr < mpnt->vm_end  */
-
-       if (mpnt->vm_start >= addr+len)
-               return 0;
-
-       /* If we'll make "hole", check the vm areas limit */
-       if ((mpnt->vm_start < addr && mpnt->vm_end > addr+len)
-           && mm->map_count >= max_map_count)
-               return -ENOMEM;
-
-       /*
-        * We may need one additional vma to fix up the mappings ... 
-        * and this is the last chance for an easy error exit.
-        */
-       extra = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-       if (!extra)
-               return -ENOMEM;
-
-       npp = (prev ? &prev->vm_next : &mm->mmap);
-       free = NULL;
-       spin_lock(&mm->page_table_lock);
-       for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
-               *npp = mpnt->vm_next;
-               mpnt->vm_next = free;
-               free = mpnt;
-               rb_erase(&mpnt->vm_rb, &mm->mm_rb);
-       }
-       mm->mmap_cache = NULL;  /* Kill the cache. */
-       spin_unlock(&mm->page_table_lock);
-
-       /* Ok - we have the memory areas we should free on the 'free' list,
-        * so release them, and unmap the page range..
-        * If the one of the segments is only being partially unmapped,
-        * it will put new vm_area_struct(s) into the address space.
-        * In that case we have to be careful with VM_DENYWRITE.
-        */
-       while ((mpnt = free) != NULL) {
-               unsigned long st, end, size;
-               struct file *file = NULL;
-
-               free = free->vm_next;
-
-               st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
-               end = addr+len;
-               end = end > mpnt->vm_end ? mpnt->vm_end : end;
-               size = end - st;
-
-               if (mpnt->vm_flags & VM_DENYWRITE &&
-                   (st != mpnt->vm_start || end != mpnt->vm_end) &&
-                   (file = mpnt->vm_file) != NULL) {
-                       atomic_dec(&file->f_dentry->d_inode->i_writecount);
-               }
-               remove_shared_vm_struct(mpnt);
-               mm->map_count--;
-
-               zap_page_range(mm, st, size);
-
-               /*
-                * Fix the mapping, and free the old area if it wasn't reused.
-                */
-               extra = unmap_fixup(mm, mpnt, st, size, extra);
-               if (file)
-                       atomic_inc(&file->f_dentry->d_inode->i_writecount);
-       }
-       validate_mm(mm);
-
-       /* Release the extra vma struct if it wasn't used */
-       if (extra)
-               kmem_cache_free(vm_area_cachep, extra);
-
-       free_pgtables(mm, prev, addr, addr+len);
-
-       return 0;
-}
-
-asmlinkage long sys_munmap(unsigned long addr, size_t len)
-{
-       int ret;
-       struct mm_struct *mm = current->mm;
-
-       down_write(&mm->mmap_sem);
-       ret = do_munmap(mm, addr, len);
-       up_write(&mm->mmap_sem);
-       return ret;
-}
-
-/*
- *  this is really a simplified "do_mmap".  it only handles
- *  anonymous maps.  eventually we may be able to do some
- *  brk-specific accounting here.
- */
-unsigned long do_brk(unsigned long addr, unsigned long len)
-{
-       struct mm_struct * mm = current->mm;
-       struct vm_area_struct * vma, * prev;
-       unsigned long flags;
-       rb_node_t ** rb_link, * rb_parent;
-
-       len = PAGE_ALIGN(len);
-       if (!len)
-               return addr;
-
-       if ((addr + len) > TASK_SIZE || (addr + len) < addr)
-               return -EINVAL;
-
-       /*
-        * mlock MCL_FUTURE?
-        */
-       if (mm->def_flags & VM_LOCKED) {
-               unsigned long locked = mm->locked_vm << PAGE_SHIFT;
-               locked += len;
-               if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
-                       return -EAGAIN;
-       }
-
-       /*
-        * Clear old maps.  this also does some error checking for us
-        */
- munmap_back:
-       vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
-       if (vma && vma->vm_start < addr + len) {
-               if (do_munmap(mm, addr, len))
-                       return -ENOMEM;
-               goto munmap_back;
-       }
-
-       /* Check against address space limits *after* clearing old maps... */
-       if ((mm->total_vm << PAGE_SHIFT) + len
-           > current->rlim[RLIMIT_AS].rlim_cur)
-               return -ENOMEM;
-
-       if (mm->map_count > max_map_count)
-               return -ENOMEM;
-
-       if (!vm_enough_memory(len >> PAGE_SHIFT))
-               return -ENOMEM;
-
-       flags = VM_DATA_DEFAULT_FLAGS | mm->def_flags;
-
-       /* Can we just expand an old anonymous mapping? */
-       if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len, flags))
-               goto out;
-
-       /*
-        * create a vma struct for an anonymous mapping
-        */
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-       if (!vma)
-               return -ENOMEM;
-
-       vma->vm_mm = mm;
-       vma->vm_start = addr;
-       vma->vm_end = addr + len;
-       vma->vm_flags = flags;
-       vma->vm_page_prot = protection_map[flags & 0x0f];
-       vma->vm_ops = NULL;
-       vma->vm_pgoff = 0;
-       vma->vm_file = NULL;
-       vma->vm_private_data = NULL;
-
-       vma_link(mm, vma, prev, rb_link, rb_parent);
-
-out:
-       mm->total_vm += len >> PAGE_SHIFT;
-       if (flags & VM_LOCKED) {
-               mm->locked_vm += len >> PAGE_SHIFT;
-               make_pages_present(addr, addr + len);
-       }
-       return addr;
-}
-
-/* Build the RB tree corresponding to the VMA list. */
-void build_mmap_rb(struct mm_struct * mm)
-{
-       struct vm_area_struct * vma;
-       rb_node_t ** rb_link, * rb_parent;
-
-       mm->mm_rb = RB_ROOT;
-       rb_link = &mm->mm_rb.rb_node;
-       rb_parent = NULL;
-       for (vma = mm->mmap; vma; vma = vma->vm_next) {
-               __vma_link_rb(mm, vma, rb_link, rb_parent);
-               rb_parent = &vma->vm_rb;
-               rb_link = &rb_parent->rb_right;
-       }
-}
-
-/* Release all mmaps. */
-void exit_mmap(struct mm_struct * mm)
-{
-       struct vm_area_struct * mpnt;
-
-       release_segments(mm);
-       spin_lock(&mm->page_table_lock);
-       mpnt = mm->mmap;
-       mm->mmap = mm->mmap_cache = NULL;
-       mm->mm_rb = RB_ROOT;
-       mm->rss = 0;
-       spin_unlock(&mm->page_table_lock);
-       mm->total_vm = 0;
-       mm->locked_vm = 0;
-
-       flush_cache_mm(mm);
-       while (mpnt) {
-               struct vm_area_struct * next = mpnt->vm_next;
-               unsigned long start = mpnt->vm_start;
-               unsigned long end = mpnt->vm_end;
-               unsigned long size = end - start;
-
-               if (mpnt->vm_ops) {
-                       if (mpnt->vm_ops->close)
-                               mpnt->vm_ops->close(mpnt);
-               }
-               mm->map_count--;
-               remove_shared_vm_struct(mpnt);
-               zap_page_range(mm, start, size);
-               if (mpnt->vm_file)
-                       fput(mpnt->vm_file);
-               kmem_cache_free(vm_area_cachep, mpnt);
-               mpnt = next;
-       }
-
-       /* This is just debugging */
-       if (mm->map_count)
-               BUG();
-
-       clear_page_tables(mm, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
-
-       flush_tlb_mm(mm);
-}
-
-/* Insert vm structure into process list sorted by address
- * and into the inode's i_mmap ring.  If vm_file is non-NULL
- * then the i_shared_lock must be held here.
- */
-void __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
-{
-       struct vm_area_struct * __vma, * prev;
-       rb_node_t ** rb_link, * rb_parent;
-
-       __vma = find_vma_prepare(mm, vma->vm_start, &prev, &rb_link, &rb_parent);
-       if (__vma && __vma->vm_start < vma->vm_end)
-               BUG();
-       __vma_link(mm, vma, prev, rb_link, rb_parent);
-       mm->map_count++;
-       validate_mm(mm);
-}
-
-void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
-{
-       struct vm_area_struct * __vma, * prev;
-       rb_node_t ** rb_link, * rb_parent;
-
-       __vma = find_vma_prepare(mm, vma->vm_start, &prev, &rb_link, &rb_parent);
-       if (__vma && __vma->vm_start < vma->vm_end)
-               BUG();
-       vma_link(mm, vma, prev, rb_link, rb_parent);
-       validate_mm(mm);
-}
index 2bd609f830d9037e2fdb24e409a7909881fec213..55063591f02bbd7203a61c7bf27280eede0d55c1 100644 (file)
@@ -6,7 +6,7 @@ XENARCH := $(subst ",,$(CONFIG_XENARCH))
 
 CFLAGS += -Iarch/$(XENARCH)/mm
 
-obj-y  := init.o fault.o ioremap.o pgtable.o hypervisor.o mmap.o
+obj-y  := init.o fault.o ioremap.o pgtable.o hypervisor.o
 c-obj-y        := extable.o pageattr.o 
 
 c-obj-$(CONFIG_DISCONTIGMEM)   += discontig.o
index f508ccd8c3e9b1e2d28a67686583654d06fd5887..d850d279d5fea294f474b0ea33bb4cfc43967a20 100644 (file)
@@ -248,8 +248,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
         * (error_code & 4) == 0, and that the fault was not a
         * protection error (error_code & 1) == 0.
         */
-       if (unlikely(address >= TASK_SIZE) ||
-           unlikely(address < (FIRST_USER_PGD_NR<<PGDIR_SHIFT))) { 
+       if (unlikely(address >= TASK_SIZE)) { 
                if (!(error_code & 5))
                        goto vmalloc_fault;
                /* 
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/mm/mmap.c b/linux-2.6.7-xen-sparse/arch/xen/i386/mm/mmap.c
deleted file mode 100644 (file)
index b557429..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-
-#include <linux/slab.h>
-#include <linux/version.h>
-#include <linux/mman.h>
-#include <linux/init.h>
-#include <asm/pgalloc.h>
-
-unsigned long
-arch_get_unmapped_area(struct file *filp, unsigned long addr,
-               unsigned long len, unsigned long pgoff, unsigned long flags)
-{
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       unsigned long start_addr;
-
-       if (len > TASK_SIZE)
-               return -ENOMEM;
-
-       if (addr) {
-               addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
-               if (((TASK_SIZE - len) >= addr) &&
-                   (addr >= (FIRST_USER_PGD_NR<<PGDIR_SHIFT)) &&
-                   (!vma || ((addr + len) <= vma->vm_start)))
-                       return addr;
-       }
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-       start_addr = addr = mm->free_area_cache;
-#else
-       start_addr = addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
-#endif
-
-full_search:
-       for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
-               /* At this point:  (!vma || addr < vma->vm_end). */
-               if (TASK_SIZE - len < addr) {
-                       /*
-                        * Start a new search - just in case we missed
-                        * some holes.
-                        */
-                       if (start_addr != TASK_UNMAPPED_BASE) {
-                               start_addr = addr = TASK_UNMAPPED_BASE;
-                               goto full_search;
-                       }
-                       return -ENOMEM;
-               }
-               if (!vma || addr + len <= vma->vm_start) {
-                       /*
-                        * Remember the place where we stopped the search:
-                        */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-                       mm->free_area_cache = addr + len;
-#endif
-                       return addr;
-               }
-               addr = vma->vm_end;
-       }
-}
-
-unsigned long
-arch_check_fixed_mapping(struct file *filp, unsigned long addr,
-               unsigned long len, unsigned long pgoff, unsigned long flags)
-{
-       if (addr < (FIRST_USER_PGD_NR<<PGDIR_SHIFT)) {
-               printk(KERN_ALERT "WARNING: Preventing a mmap() request by %s at 0x%08lx, len %08lx\n",
-               current->comm, addr, len);
-               return -EINVAL;
-       }
-       return 0;
-}
index 31c2fb0e5f76842e83777b7dced7b306a707c41a..e9e70e61b09bfe850fc7c6e0cf725c7838f08c74 100644 (file)
@@ -258,21 +258,16 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
        if (PTRS_PER_PMD == 1)
                spin_lock_irqsave(&pgd_lock, flags);
 
-       memcpy((pgd_t *)pgd,
-                       swapper_pg_dir,
-                       FIRST_USER_PGD_NR * sizeof(pgd_t));
-       memcpy((pgd_t *)pgd + FIRST_USER_PGD_NR + USER_PTRS_PER_PGD,
-                       swapper_pg_dir + FIRST_USER_PGD_NR + USER_PTRS_PER_PGD,
-                       (PTRS_PER_PGD - USER_PTRS_PER_PGD -
-                        FIRST_USER_PGD_NR) * sizeof(pgd_t));
+       memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
+                       swapper_pg_dir + USER_PTRS_PER_PGD,
+                       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
 
        if (PTRS_PER_PMD > 1)
                goto out;
 
        pgd_list_add(pgd);
        spin_unlock_irqrestore(&pgd_lock, flags);
-       memset((pgd_t *)pgd + FIRST_USER_PGD_NR,
-                       0, USER_PTRS_PER_PGD*sizeof(pgd_t));
+       memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
  out:
        __make_page_readonly(pgd);
        queue_pgd_pin(__pa(pgd));
index 3fc8d85ab18c9d64fa5b3e6d549f326f3204f033..ef9958e3b70bbe4dab2539c3534a41fbd3d6eb50 100644 (file)
@@ -58,13 +58,14 @@ static struct {
     unsigned long      id;
 } ctrl_if_txmsg_id_mapping[CONTROL_RING_SIZE];
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-static struct tq_struct ctrl_if_rxmsg_deferred_tq;
+/* For received messages that must be deferred to process context. */
+static void __ctrl_if_rxmsg_deferred(void *unused);
+static DECLARE_WORK(ctrl_if_rxmsg_deferred_work,
+                    __ctrl_if_rxmsg_deferred,
+                    NULL);
+
+/* Deferred callbacks for people waiting for space in the transmit ring. */
 static DECLARE_TASK_QUEUE(ctrl_if_tx_tq);
-#else
-static struct work_struct ctrl_if_rxmsg_deferred_work;
-static struct workqueue_struct *ctrl_if_tx_wq = NULL;
-#endif
 
 static DECLARE_WAIT_QUEUE_HEAD(ctrl_if_tx_wait);
 static void __ctrl_if_tx_tasklet(unsigned long data);
@@ -127,9 +128,7 @@ static void __ctrl_if_tx_tasklet(unsigned long data)
     if ( was_full && !TX_FULL(ctrl_if) )
     {
         wake_up(&ctrl_if_tx_wait);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
         run_task_queue(&ctrl_if_tx_tq);
-#endif
     }
 }
 
@@ -184,11 +183,7 @@ static void __ctrl_if_rx_tasklet(unsigned long data)
     {
         wmb();
         ctrl_if_rxmsg_deferred_prod = dp;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-        schedule_task(&ctrl_if_rxmsg_deferred_tq);
-#else
         schedule_work(&ctrl_if_rxmsg_deferred_work);
-#endif
     }
 }
 
@@ -285,7 +280,7 @@ int ctrl_if_send_message_block(
     return rc;
 }
 
-int ctrl_if_enqueue_space_callback(struct work_struct *work)
+int ctrl_if_enqueue_space_callback(struct tq_struct *task)
 {
     control_if_t *ctrl_if = get_ctrl_if();
 
@@ -293,14 +288,7 @@ int ctrl_if_enqueue_space_callback(struct work_struct *work)
     if ( !TX_FULL(ctrl_if) )
         return 0;
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-    (void)queue_task(work, &ctrl_if_tx_tq);
-#else
-    if ( ctrl_if_tx_wq )
-        (void)queue_work(ctrl_if_tx_wq, work);
-    else
-        return 1;
-#endif
+    (void)queue_task(task, &ctrl_if_tx_tq);
 
     /*
      * We may race execution of the task queue, so return re-checked status. If
@@ -439,13 +427,6 @@ void __init ctrl_if_init(void)
 
     for ( i = 0; i < 256; i++ )
         ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-    ctrl_if_rxmsg_deferred_tq.routine = __ctrl_if_rxmsg_deferred;
-#else
-    INIT_WORK(&ctrl_if_rxmsg_deferred_work,
-              (void *)__ctrl_if_rxmsg_deferred,
-              NULL);
-#endif
 
     spin_lock_init(&ctrl_if_lock);
 
@@ -457,11 +438,6 @@ void __init ctrl_if_init(void)
 static int __init ctrl_if_late_setup(void)
 {
     safe_to_schedule_task = 1;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    ctrl_if_tx_wq = create_workqueue("ctrl_if_tx");
-    if ( ctrl_if_tx_wq == NULL )
-        return 1;
-#endif
     return 0;
 }
 __initcall(ctrl_if_late_setup);
index 2e35a50567bd1c52917979aacf9179241ff05e5a..50861e2b1dba1179d6567a7abe60369665efb78c 100644 (file)
 #include <linux/pagemap.h>
 #include <linux/vmalloc.h>
 #include <linux/highmem.h>
+#include <linux/mman.h>
 #include <asm/fixmap.h>
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
 
-#if 0
+#if 1
 #define ASSERT(_p) \
     if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
     __LINE__, __FILE__); *(int*)0=0; }
 #define TestSetPageLocked(_p) TryLockPage(_p)
 #define PageAnon(_p)          0 /* no equivalent in 2.4 */
 #define pte_offset_kernel     pte_offset
-extern int __vmalloc_area_pages(unsigned long address,
-                                unsigned long size,
-                                int gfp_mask,
-                                pgprot_t prot,
-                                struct page ***pages);
-#else
-static inline int __vmalloc_area_pages(unsigned long address,
-                                unsigned long size,
-                                int gfp_mask,
-                                pgprot_t prot,
-                                struct page ***pages)
-{
-    struct vm_struct vma;
-    vma.addr = (void *)address;
-    vma.size = size + PAGE_SIZE; /* retarded interface */
-    return map_vm_area(&vma, prot, pages);
-}
+#define remap_page_range(_a,_b,_c,_d,_e) remap_page_range(_b,_c,_d,_e)
+#define daemonize(_n)                   \
+    do {                                \
+        daemonize();                    \
+        strcpy(current->comm, _n);      \
+        sigfillset(&current->blocked);  \
+    } while ( 0 )
 #endif
 
 static unsigned char *fixup_buf;
@@ -235,6 +226,64 @@ static unsigned int parse_insn(unsigned char *insn,
     return ((pb - insn) + 1 + (d & INSN_SUFFIX_BYTES));
 }
 
+#define SUCCESS 1
+#define FAIL    0
+static int map_fixup_buf(struct mm_struct *mm)
+{
+    struct vm_area_struct *vma;
+
+    /* Already mapped? This is a pretty safe check. */
+    if ( ((vma = find_vma(current->mm, FIXUP_BUF_USER)) != NULL) &&
+         (vma->vm_start <= FIXUP_BUF_USER) &&
+         (vma->vm_flags == (VM_READ | VM_MAYREAD | VM_RESERVED)) &&
+         (vma->vm_file == NULL) )
+        return SUCCESS;
+
+    if ( (vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL)) == NULL )
+    {
+        DPRINTK("Cannot allocate VMA.");
+        return FAIL;
+    }
+
+    memset(vma, 0, sizeof(*vma));
+
+    vma->vm_mm        = mm;
+    vma->vm_flags     = VM_READ | VM_MAYREAD | VM_RESERVED;
+    vma->vm_page_prot = PAGE_READONLY;
+
+    down_write(&mm->mmap_sem);
+
+    vma->vm_start = get_unmapped_area(
+        NULL, FIXUP_BUF_USER, FIXUP_BUF_SIZE,
+        0, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED);
+    if ( vma->vm_start != FIXUP_BUF_USER )
+    {
+        DPRINTK("Cannot allocate low-memory-region VMA.");
+        up_write(&mm->mmap_sem);
+        kmem_cache_free(vm_area_cachep, vma);
+        return FAIL;
+    }
+
+    vma->vm_end = vma->vm_start + FIXUP_BUF_SIZE;
+
+    if ( remap_page_range(vma, vma->vm_start, __pa(fixup_buf), 
+                          vma->vm_end - vma->vm_start, vma->vm_page_prot) )
+    {
+        DPRINTK("Cannot map low-memory-region VMA.");
+        up_write(&mm->mmap_sem);
+        kmem_cache_free(vm_area_cachep, vma);
+        return FAIL;
+    }
+
+    insert_vm_struct(mm, vma);
+    
+    mm->total_vm += FIXUP_BUF_SIZE >> PAGE_SHIFT;
+
+    up_write(&mm->mmap_sem);
+
+    return SUCCESS;
+}
+
 /*
  * Mainly this function checks that our patches can't erroneously get flushed
  * to a file on disc, which would screw us after reboot!
@@ -251,7 +300,8 @@ static int safe_to_patch(struct mm_struct *mm, unsigned long addr)
     if ( addr <= (FIXUP_BUF_USER + FIXUP_BUF_SIZE) )
         return SUCCESS;
 
-    if ( (vma = find_vma(current->mm, addr)) == NULL )
+    if ( ((vma = find_vma(current->mm, addr)) == NULL) ||
+         (vma->vm_start > addr) )
     {
         DPRINTK("No VMA contains fault address.");
         return FAIL;
@@ -314,6 +364,9 @@ asmlinkage void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
         return;
     }
 
+    if ( unlikely(!map_fixup_buf(mm)) )
+        goto out;
+
     /* Hold the mmap_sem to prevent the mapping from disappearing under us. */
     down_read(&mm->mmap_sem);
 
@@ -669,8 +722,14 @@ asmlinkage void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
 
     /* Find the physical page that is to be patched. */
     pgd = pgd_offset(current->mm, eip);
+    if ( unlikely(!pgd_present(*pgd)) )
+        goto unlock_and_out;
     pmd = pmd_offset(pgd, eip);
+    if ( unlikely(!pmd_present(*pmd)) )
+        goto unlock_and_out;
     pte = pte_offset_kernel(pmd, eip);
+    if ( unlikely(!pte_present(*pte)) )
+        goto unlock_and_out;
     page = pte_page(*pte);
 
     /*
@@ -680,8 +739,7 @@ asmlinkage void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
     if ( unlikely(TestSetPageLocked(page)) )
     {
         DPRINTK("Page is locked.");
-        spin_unlock(&mm->page_table_lock);
-        goto out;
+        goto unlock_and_out;
     }
 
     /*
@@ -692,8 +750,7 @@ asmlinkage void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
     {
         DPRINTK("Page is dirty or anonymous.");
         unlock_page(page);
-        spin_unlock(&mm->page_table_lock);
-        goto out;
+        goto unlock_and_out;
     }
 
     veip = kmap(page);
@@ -709,30 +766,43 @@ asmlinkage void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
 
  out:
     up_read(&mm->mmap_sem);
+    return;
+
+ unlock_and_out:
+    spin_unlock(&mm->page_table_lock);
+    up_read(&mm->mmap_sem);
+    return;
+}
+
+static int fixup_thread(void *unused)
+{
+    daemonize("segfixup");
+    
+    for ( ; ; )
+    {
+        set_current_state(TASK_INTERRUPTIBLE);
+        schedule();
+    }
 }
 
 static int nosegfixup = 0;
 
 static int __init fixup_init(void)
 {
-    struct page *_pages[1<<FIXUP_BUF_ORDER], **pages=_pages;
     int i;
 
-    if ( nosegfixup )
-        return 0;
-
-    HYPERVISOR_vm_assist(VMASST_CMD_enable,
-                         VMASST_TYPE_4gb_segments_notify);
-
-    fixup_buf = (char *)__get_free_pages(GFP_ATOMIC, FIXUP_BUF_ORDER);
-    for ( i = 0; i < (1<<FIXUP_BUF_ORDER); i++ )
-        _pages[i] = virt_to_page(fixup_buf) + i;
+    nosegfixup = 1; /* XXX */
 
-    if ( __vmalloc_area_pages(FIXUP_BUF_USER, FIXUP_BUF_SIZE, 
-                              0, PAGE_READONLY, &pages) != 0 )
-        BUG();
-
-    memset(fixup_hash, 0, sizeof(fixup_hash));
+    if ( !nosegfixup )
+    {
+        HYPERVISOR_vm_assist(VMASST_CMD_enable,
+                             VMASST_TYPE_4gb_segments_notify);
+        fixup_buf = (char *)__get_free_pages(GFP_ATOMIC, FIXUP_BUF_ORDER);
+        for ( i = 0; i < (1 << FIXUP_BUF_ORDER); i++ )
+            SetPageReserved(virt_to_page(fixup_buf) + i);
+        memset(fixup_hash, 0, sizeof(fixup_hash));
+        (void)kernel_thread(fixup_thread, NULL, CLONE_FS | CLONE_FILES);
+    }
 
     return 0;
 }
index a3d46cb23197c7361ed93f7ca71e32f29a208dd1..83fec948578c31be7429fc7460ff2b8518b49094 100644 (file)
@@ -14,10 +14,7 @@ static int errno;
 #include <asm-xen/hypervisor.h>
 #include <asm-xen/hypervisor-ifs/dom0_ops.h>
 #include <asm-xen/suspend.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-int reboot_thru_bios = 0;      /* for dmi_scan.c */
-#endif
+#include <asm-xen/queues.h>
 
 void machine_restart(char * __unused)
 {
@@ -27,19 +24,11 @@ void machine_restart(char * __unused)
        HYPERVISOR_reboot();
 }
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-EXPORT_SYMBOL(machine_restart);
-#endif
-
 void machine_halt(void)
 {
        machine_power_off();
 }
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-EXPORT_SYMBOL(machine_halt);
-#endif
-
 void machine_power_off(void)
 {
        /* We really want to get pending console data out before we die. */
@@ -49,11 +38,13 @@ void machine_power_off(void)
 }
 
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+int reboot_thru_bios = 0;      /* for dmi_scan.c */
+EXPORT_SYMBOL(machine_restart);
+EXPORT_SYMBOL(machine_halt);
 EXPORT_SYMBOL(machine_power_off);
 #endif
 
 
-
 /******************************************************************************
  * Stop/pickle callback handling.
  */
@@ -65,7 +56,9 @@ static int shutting_down = -1;
 
 static void __do_suspend(void)
 {
-    int i,j;
+    int i, j;
+    suspend_record_t *suspend_record;
+
     /* Hmmm... a cleaner interface to suspend/resume blkdevs would be nice. */
     extern void blkdev_suspend(void);
     extern void blkdev_resume(void);
@@ -76,10 +69,8 @@ static void __do_suspend(void)
     extern unsigned long max_pfn;
     extern unsigned long *pfn_to_mfn_frame_list;
 
-    suspend_record_t *suspend_record     = NULL;
-
-    if ( (suspend_record = (suspend_record_t *)__get_free_page(GFP_KERNEL))
-         == NULL )
+    suspend_record = (suspend_record_t *)__get_free_page(GFP_KERNEL);
+    if ( suspend_record == NULL )
         goto out;
 
     suspend_record->nr_pfns = max_pfn; /* final number of pfns */
@@ -205,11 +196,7 @@ static void __shutdown_handler(void *unused)
 
 static void shutdown_handler(ctrl_msg_t *msg, unsigned long id)
 {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
     static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
-#else
-    static struct tq_struct shutdown_tq;
-#endif
 
     if ( (shutting_down == -1) &&
          ((msg->subtype == CMSG_SHUTDOWN_POWEROFF) ||
@@ -217,12 +204,7 @@ static void shutdown_handler(ctrl_msg_t *msg, unsigned long id)
           (msg->subtype == CMSG_SHUTDOWN_SUSPEND)) )
     {
         shutting_down = msg->subtype;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
         schedule_work(&shutdown_work);
-#else
-        shutdown_tq.routine = __shutdown_handler;
-        schedule_task(&shutdown_tq);
-#endif
     }
     else
     {
index 1fbfebb4cc9234c14d9e1141b2e16ecb01e27b21..7942b12b6017f151dd486db0f465b09799e785ea 100644 (file)
@@ -74,16 +74,12 @@ static void __xencons_tx_flush(void);
 /* This task is used to defer sending console data until there is space. */
 static void xencons_tx_flush_task_routine(void *data);
 
+static struct tq_struct xencons_tx_flush_task;
+
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 static struct tty_driver *xencons_driver;
-static DECLARE_WORK(xencons_tx_flush_task,
-                    xencons_tx_flush_task_routine,
-                    NULL);
 #else
 static struct tty_driver xencons_driver;
-static struct tq_struct xencons_tx_flush_task = {
-    routine: xencons_tx_flush_task_routine
-};
 #endif
 
 
@@ -635,6 +631,8 @@ static int __init xencons_init(void)
     if ( xc_mode == XC_OFF )
         return 0;
 
+    INIT_TQUEUE(&xencons_tx_flush_task, xencons_tx_flush_task_routine, NULL);
+
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
     xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 
                                       1 : MAX_NR_CONSOLES);
index b12547a8138c529a62b2f533abc983373b5c13d7..e1c2031a717aec670de5f7297807625da7746b51 100644 (file)
@@ -64,17 +64,16 @@ void paging_init(void);
 #define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
 #define PGDIR_MASK     (~(PGDIR_SIZE-1))
 
-#define FIRST_USER_PGD_NR      1
-#define USER_PTRS_PER_PGD      ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
+#define USER_PTRS_PER_PGD      (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_PGD_NR      0
 
-#if 0 /* XEN */
 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
 
 #define TWOLEVEL_PGDIR_SHIFT   22
 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
 #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
-#endif
+
 
 #ifndef __ASSEMBLY__
 /* Just any arbitrary offset to the start of the vmalloc VM area: the
@@ -462,7 +461,4 @@ static inline unsigned long arbitrary_virt_to_phys(void *va)
 #define __HAVE_ARCH_PTE_SAME
 #include <asm-generic/pgtable.h>
 
-#define HAVE_ARCH_UNMAPPED_AREA
-#define HAVE_ARCH_CHECK_FIXED_MAPPING
-
 #endif /* _I386_PGTABLE_H */
index 09fac29b5fb7592ec33e9235afc2f463ff6682f0..c90e010a03516c3b14b6683449958fa623f7e5dd 100644 (file)
 #define __ASM_XEN__CTRL_IF_H__
 
 #include <asm-xen/hypervisor.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-#include <linux/tqueue.h>
-#define work_struct tq_struct
-#endif
+#include <asm-xen/queues.h>
 
 typedef control_msg_t ctrl_msg_t;
 
@@ -69,7 +65,7 @@ int ctrl_if_send_message_block(
  * still be executed. If this function returns 1 then the callback /will/ be
  * executed when space becomes available.
  */
-int ctrl_if_enqueue_space_callback(struct work_struct *task);
+int ctrl_if_enqueue_space_callback(struct tq_struct *task);
 
 /*
  * Send a response (@msg) to a message from the domain controller. This will 
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/queues.h b/linux-2.6.7-xen-sparse/include/asm-xen/queues.h
new file mode 100644 (file)
index 0000000..f2a66a7
--- /dev/null
@@ -0,0 +1,75 @@
+
+/*
+ * Oh dear. Task queues were removed from Linux 2.6 and replaced by work 
+ * queues. Unfortunately the semantics is not the same. With task queues we 
+ * can defer work until a particular event occurs -- this is not
+ * straightforwardly done with work queues (queued work is performed asap, or
+ * after some fixed timeout). Conversely, work queues are a (slightly) neater
+ * way of deferring work to a process context than using task queues in 2.4.
+ * 
+ * So, what we do here is a bit weird:
+ *  1. On 2.4, we emulate work queues over task queues.
+ *  2. On 2.6, we emulate task queues over work queues.
+ * 
+ * Note how much harder the latter is. :-)
+ */
+
+#ifndef __QUEUES_H__
+#define __QUEUES_H__
+
+#include <linux/version.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+
+struct tq_struct { 
+    struct work_struct work;
+    struct list_head   list;
+    unsigned long      pending;
+};
+#define INIT_TQUEUE(_name, _fn, _arg)               \
+    do {                                            \
+        INIT_LIST_HEAD(&(_name)->list);             \
+        (_name)->pending = 0;                       \
+        INIT_WORK(&(_name)->work, (_fn), (_arg));   \
+    } while ( 0 )
+
+typedef struct {
+    struct list_head list;
+    spinlock_t       lock;
+} task_queue;
+#define DECLARE_TASK_QUEUE(_name) \
+    task_queue _name = { LIST_HEAD_INIT((_name).list), SPIN_LOCK_UNLOCKED }
+
+static inline int queue_task(struct tq_struct *tqe, task_queue *tql)
+{
+    unsigned long flags;
+    if ( test_and_set_bit(0, &tqe->pending) )
+        return 0;
+    spin_lock_irqsave(&tql->lock, flags);
+    list_add_tail(&tqe->list, &tql->list);
+    spin_unlock_irqrestore(&tql->lock, flags);
+    return 1;
+}
+
+static inline void run_task_queue(task_queue *tql)
+{
+    struct list_head head, *ent;
+    struct tq_struct *tqe;
+    unsigned long flags;
+
+    spin_lock_irqsave(&tql->lock, flags);
+    list_add(&head, &tql->list);
+    list_del_init(&tql->list);
+    spin_unlock_irqrestore(&tql->lock, flags);
+
+    while ( !list_empty(&head) )
+    {
+        ent = head.next;
+        list_del_init(ent);
+        tqe = list_entry(ent, struct tq_struct, list);
+        wmb(); tqe->pending = 0;
+        schedule_work(&tqe->work);
+    }
+}
+
+#endif /* __QUEUES_H__ */
diff --git a/linux-2.6.7-xen-sparse/mm/mmap.c b/linux-2.6.7-xen-sparse/mm/mmap.c
deleted file mode 100644 (file)
index f824336..0000000
+++ /dev/null
@@ -1,1816 +0,0 @@
-/*
- * mm/mmap.c
- *
- * Written by obz.
- *
- * Address space accounting code       <alan@redhat.com>
- */
-
-#include <linux/slab.h>
-#include <linux/shm.h>
-#include <linux/mman.h>
-#include <linux/pagemap.h>
-#include <linux/swap.h>
-#include <linux/syscalls.h>
-#include <linux/init.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/personality.h>
-#include <linux/security.h>
-#include <linux/hugetlb.h>
-#include <linux/profile.h>
-#include <linux/module.h>
-#include <linux/mount.h>
-#include <linux/mempolicy.h>
-#include <linux/rmap.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/cacheflush.h>
-#include <asm/tlb.h>
-
-/*
- * WARNING: the debugging will use recursive algorithms so never enable this
- * unless you know what you are doing.
- */
-#undef DEBUG_MM_RB
-
-/* description of effects of mapping type and prot in current implementation.
- * this is due to the limited x86 page protection hardware.  The expected
- * behavior is in parens:
- *
- * map_type    prot
- *             PROT_NONE       PROT_READ       PROT_WRITE      PROT_EXEC
- * MAP_SHARED  r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
- *             w: (no) no      w: (no) no      w: (yes) yes    w: (no) no
- *             x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
- *             
- * MAP_PRIVATE r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
- *             w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
- *             x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
- *
- */
-pgprot_t protection_map[16] = {
-       __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
-       __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
-};
-
-int sysctl_overcommit_memory = 0;      /* default is heuristic overcommit */
-int sysctl_overcommit_ratio = 50;      /* default is 50% */
-int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
-atomic_t vm_committed_space = ATOMIC_INIT(0);
-
-EXPORT_SYMBOL(sysctl_overcommit_memory);
-EXPORT_SYMBOL(sysctl_overcommit_ratio);
-EXPORT_SYMBOL(sysctl_max_map_count);
-EXPORT_SYMBOL(vm_committed_space);
-
-/*
- * Requires inode->i_mapping->i_mmap_lock
- */
-static void __remove_shared_vm_struct(struct vm_area_struct *vma,
-               struct file *file, struct address_space *mapping)
-{
-       if (vma->vm_flags & VM_DENYWRITE)
-               atomic_inc(&file->f_dentry->d_inode->i_writecount);
-       if (vma->vm_flags & VM_SHARED)
-               mapping->i_mmap_writable--;
-
-       flush_dcache_mmap_lock(mapping);
-       if (unlikely(vma->vm_flags & VM_NONLINEAR))
-               list_del_init(&vma->shared.vm_set.list);
-       else
-               vma_prio_tree_remove(vma, &mapping->i_mmap);
-       flush_dcache_mmap_unlock(mapping);
-}
-
-/*
- * Remove one vm structure and free it.
- */
-static void remove_vm_struct(struct vm_area_struct *vma)
-{
-       struct file *file = vma->vm_file;
-
-       if (file) {
-               struct address_space *mapping = file->f_mapping;
-               spin_lock(&mapping->i_mmap_lock);
-               __remove_shared_vm_struct(vma, file, mapping);
-               spin_unlock(&mapping->i_mmap_lock);
-       }
-       if (vma->vm_ops && vma->vm_ops->close)
-               vma->vm_ops->close(vma);
-       if (file)
-               fput(file);
-       anon_vma_unlink(vma);
-       mpol_free(vma_policy(vma));
-       kmem_cache_free(vm_area_cachep, vma);
-}
-
-/*
- *  sys_brk() for the most part doesn't need the global kernel
- *  lock, except when an application is doing something nasty
- *  like trying to un-brk an area that has already been mapped
- *  to a regular file.  in this case, the unmapping will need
- *  to invoke file system routines that need the global lock.
- */
-asmlinkage unsigned long sys_brk(unsigned long brk)
-{
-       unsigned long rlim, retval;
-       unsigned long newbrk, oldbrk;
-       struct mm_struct *mm = current->mm;
-
-       down_write(&mm->mmap_sem);
-
-       if (brk < mm->end_code)
-               goto out;
-       newbrk = PAGE_ALIGN(brk);
-       oldbrk = PAGE_ALIGN(mm->brk);
-       if (oldbrk == newbrk)
-               goto set_brk;
-
-       /* Always allow shrinking brk. */
-       if (brk <= mm->brk) {
-               if (!do_munmap(mm, newbrk, oldbrk-newbrk))
-                       goto set_brk;
-               goto out;
-       }
-
-       /* Check against rlimit.. */
-       rlim = current->rlim[RLIMIT_DATA].rlim_cur;
-       if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
-               goto out;
-
-       /* Check against existing mmap mappings. */
-       if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
-               goto out;
-
-       /* Ok, looks good - let it rip. */
-       if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
-               goto out;
-set_brk:
-       mm->brk = brk;
-out:
-       retval = mm->brk;
-       up_write(&mm->mmap_sem);
-       return retval;
-}
-
-#ifdef DEBUG_MM_RB
-static int browse_rb(struct rb_root *root)
-{
-       int i = 0, j;
-       struct rb_node *nd, *pn = NULL;
-       unsigned long prev = 0, pend = 0;
-
-       for (nd = rb_first(root); nd; nd = rb_next(nd)) {
-               struct vm_area_struct *vma;
-               vma = rb_entry(nd, struct vm_area_struct, vm_rb);
-               if (vma->vm_start < prev)
-                       printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1;
-               if (vma->vm_start < pend)
-                       printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
-               if (vma->vm_start > vma->vm_end)
-                       printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
-               i++;
-               pn = nd;
-       }
-       j = 0;
-       for (nd = pn; nd; nd = rb_prev(nd)) {
-               j++;
-       }
-       if (i != j)
-               printk("backwards %d, forwards %d\n", j, i), i = 0;
-       return i;
-}
-
-void validate_mm(struct mm_struct *mm)
-{
-       int bug = 0;
-       int i = 0;
-       struct vm_area_struct *tmp = mm->mmap;
-       while (tmp) {
-               tmp = tmp->vm_next;
-               i++;
-       }
-       if (i != mm->map_count)
-               printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
-       i = browse_rb(&mm->mm_rb);
-       if (i != mm->map_count)
-               printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
-       if (bug)
-               BUG();
-}
-#else
-#define validate_mm(mm) do { } while (0)
-#endif
-
-static struct vm_area_struct *
-find_vma_prepare(struct mm_struct *mm, unsigned long addr,
-               struct vm_area_struct **pprev, struct rb_node ***rb_link,
-               struct rb_node ** rb_parent)
-{
-       struct vm_area_struct * vma;
-       struct rb_node ** __rb_link, * __rb_parent, * rb_prev;
-
-       __rb_link = &mm->mm_rb.rb_node;
-       rb_prev = __rb_parent = NULL;
-       vma = NULL;
-
-       while (*__rb_link) {
-               struct vm_area_struct *vma_tmp;
-
-               __rb_parent = *__rb_link;
-               vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
-
-               if (vma_tmp->vm_end > addr) {
-                       vma = vma_tmp;
-                       if (vma_tmp->vm_start <= addr)
-                               return vma;
-                       __rb_link = &__rb_parent->rb_left;
-               } else {
-                       rb_prev = __rb_parent;
-                       __rb_link = &__rb_parent->rb_right;
-               }
-       }
-
-       *pprev = NULL;
-       if (rb_prev)
-               *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
-       *rb_link = __rb_link;
-       *rb_parent = __rb_parent;
-       return vma;
-}
-
-static inline void
-__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
-               struct vm_area_struct *prev, struct rb_node *rb_parent)
-{
-       if (prev) {
-               vma->vm_next = prev->vm_next;
-               prev->vm_next = vma;
-       } else {
-               mm->mmap = vma;
-               if (rb_parent)
-                       vma->vm_next = rb_entry(rb_parent,
-                                       struct vm_area_struct, vm_rb);
-               else
-                       vma->vm_next = NULL;
-       }
-}
-
-void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
-               struct rb_node **rb_link, struct rb_node *rb_parent)
-{
-       rb_link_node(&vma->vm_rb, rb_parent, rb_link);
-       rb_insert_color(&vma->vm_rb, &mm->mm_rb);
-}
-
-static inline void __vma_link_file(struct vm_area_struct *vma)
-{
-       struct file * file;
-
-       file = vma->vm_file;
-       if (file) {
-               struct address_space *mapping = file->f_mapping;
-
-               if (vma->vm_flags & VM_DENYWRITE)
-                       atomic_dec(&file->f_dentry->d_inode->i_writecount);
-               if (vma->vm_flags & VM_SHARED)
-                       mapping->i_mmap_writable++;
-
-               flush_dcache_mmap_lock(mapping);
-               if (unlikely(vma->vm_flags & VM_NONLINEAR))
-                       list_add_tail(&vma->shared.vm_set.list,
-                                       &mapping->i_mmap_nonlinear);
-               else
-                       vma_prio_tree_insert(vma, &mapping->i_mmap);
-               flush_dcache_mmap_unlock(mapping);
-       }
-}
-
-static void
-__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
-       struct vm_area_struct *prev, struct rb_node **rb_link,
-       struct rb_node *rb_parent)
-{
-       __vma_link_list(mm, vma, prev, rb_parent);
-       __vma_link_rb(mm, vma, rb_link, rb_parent);
-       __anon_vma_link(vma);
-}
-
-static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
-                       struct vm_area_struct *prev, struct rb_node **rb_link,
-                       struct rb_node *rb_parent)
-{
-       struct address_space *mapping = NULL;
-
-       if (vma->vm_file)
-               mapping = vma->vm_file->f_mapping;
-
-       if (mapping)
-               spin_lock(&mapping->i_mmap_lock);
-       anon_vma_lock(vma);
-
-       __vma_link(mm, vma, prev, rb_link, rb_parent);
-       __vma_link_file(vma);
-
-       anon_vma_unlock(vma);
-       if (mapping)
-               spin_unlock(&mapping->i_mmap_lock);
-
-       mark_mm_hugetlb(mm, vma);
-       mm->map_count++;
-       validate_mm(mm);
-}
-
-/*
- * Helper for vma_adjust in the split_vma insert case:
- * insert vm structure into list and rbtree and anon_vma,
- * but it has already been inserted into prio_tree earlier.
- */
-static void
-__insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
-{
-       struct vm_area_struct * __vma, * prev;
-       struct rb_node ** rb_link, * rb_parent;
-
-       __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
-       if (__vma && __vma->vm_start < vma->vm_end)
-               BUG();
-       __vma_link(mm, vma, prev, rb_link, rb_parent);
-       mm->map_count++;
-}
-
-static inline void
-__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
-               struct vm_area_struct *prev)
-{
-       prev->vm_next = vma->vm_next;
-       rb_erase(&vma->vm_rb, &mm->mm_rb);
-       if (mm->mmap_cache == vma)
-               mm->mmap_cache = prev;
-}
-
-/*
- * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
- * is already present in an i_mmap tree without adjusting the tree.
- * The following helper function should be used when such adjustments
- * are necessary.  The "insert" vma (if any) is to be inserted
- * before we drop the necessary locks.
- */
-void vma_adjust(struct vm_area_struct *vma, unsigned long start,
-       unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       struct vm_area_struct *next = vma->vm_next;
-       struct address_space *mapping = NULL;
-       struct prio_tree_root *root = NULL;
-       struct file *file = vma->vm_file;
-       struct anon_vma *anon_vma = NULL;
-       long adjust_next = 0;
-       int remove_next = 0;
-
-       if (next && !insert) {
-               if (end >= next->vm_end) {
-                       /*
-                        * vma expands, overlapping all the next, and
-                        * perhaps the one after too (mprotect case 6).
-                        */
-again:                 remove_next = 1 + (end > next->vm_end);
-                       end = next->vm_end;
-                       anon_vma = next->anon_vma;
-               } else if (end > next->vm_start) {
-                       /*
-                        * vma expands, overlapping part of the next:
-                        * mprotect case 5 shifting the boundary up.
-                        */
-                       adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
-                       anon_vma = next->anon_vma;
-               } else if (end < vma->vm_end) {
-                       /*
-                        * vma shrinks, and !insert tells it's not
-                        * split_vma inserting another: so it must be
-                        * mprotect case 4 shifting the boundary down.
-                        */
-                       adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
-                       anon_vma = next->anon_vma;
-               }
-       }
-
-       if (file) {
-               mapping = file->f_mapping;
-               if (!(vma->vm_flags & VM_NONLINEAR))
-                       root = &mapping->i_mmap;
-               spin_lock(&mapping->i_mmap_lock);
-               if (insert) {
-                       /*
-                        * Put into prio_tree now, so instantiated pages
-                        * are visible to arm/parisc __flush_dcache_page
-                        * throughout; but we cannot insert into address
-                        * space until vma start or end is updated.
-                        */
-                       __vma_link_file(insert);
-               }
-       }
-
-       /*
-        * When changing only vma->vm_end, we don't really need
-        * anon_vma lock: but is that case worth optimizing out?
-        */
-       if (vma->anon_vma)
-               anon_vma = vma->anon_vma;
-       if (anon_vma)
-               spin_lock(&anon_vma->lock);
-
-       if (root) {
-               flush_dcache_mmap_lock(mapping);
-               vma_prio_tree_remove(vma, root);
-               if (adjust_next)
-                       vma_prio_tree_remove(next, root);
-       }
-
-       vma->vm_start = start;
-       vma->vm_end = end;
-       vma->vm_pgoff = pgoff;
-       if (adjust_next) {
-               next->vm_start += adjust_next << PAGE_SHIFT;
-               next->vm_pgoff += adjust_next;
-       }
-
-       if (root) {
-               if (adjust_next) {
-                       vma_prio_tree_init(next);
-                       vma_prio_tree_insert(next, root);
-               }
-               vma_prio_tree_init(vma);
-               vma_prio_tree_insert(vma, root);
-               flush_dcache_mmap_unlock(mapping);
-       }
-
-       if (remove_next) {
-               /*
-                * vma_merge has merged next into vma, and needs
-                * us to remove next before dropping the locks.
-                */
-               __vma_unlink(mm, next, vma);
-               if (file)
-                       __remove_shared_vm_struct(next, file, mapping);
-               if (next->anon_vma)
-                       __anon_vma_merge(vma, next);
-       } else if (insert) {
-               /*
-                * split_vma has split insert from vma, and needs
-                * us to insert it before dropping the locks
-                * (it may either follow vma or precede it).
-                */
-               __insert_vm_struct(mm, insert);
-       }
-
-       if (anon_vma)
-               spin_unlock(&anon_vma->lock);
-       if (mapping)
-               spin_unlock(&mapping->i_mmap_lock);
-
-       if (remove_next) {
-               if (file)
-                       fput(file);
-               mm->map_count--;
-               mpol_free(vma_policy(next));
-               kmem_cache_free(vm_area_cachep, next);
-               /*
-                * In mprotect's case 6 (see comments on vma_merge),
-                * we must remove another next too. It would clutter
-                * up the code too much to do both in one go.
-                */
-               if (remove_next == 2) {
-                       next = vma->vm_next;
-                       goto again;
-               }
-       }
-
-       validate_mm(mm);
-}
-
-/*
- * If the vma has a ->close operation then the driver probably needs to release
- * per-vma resources, so we don't attempt to merge those.
- */
-#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
-
-static inline int is_mergeable_vma(struct vm_area_struct *vma,
-                       struct file *file, unsigned long vm_flags)
-{
-       if (vma->vm_flags != vm_flags)
-               return 0;
-       if (vma->vm_file != file)
-               return 0;
-       if (vma->vm_ops && vma->vm_ops->close)
-               return 0;
-       return 1;
-}
-
-static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
-                                       struct anon_vma *anon_vma2)
-{
-       return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2);
-}
-
-/*
- * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
- * in front of (at a lower virtual address and file offset than) the vma.
- *
- * We cannot merge two vmas if they have differently assigned (non-NULL)
- * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
- *
- * We don't check here for the merged mmap wrapping around the end of pagecache
- * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
- * wrap, nor mmaps which cover the final page at index -1UL.
- */
-static int
-can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
-       struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
-{
-       if (is_mergeable_vma(vma, file, vm_flags) &&
-           is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
-               if (vma->vm_pgoff == vm_pgoff)
-                       return 1;
-       }
-       return 0;
-}
-
-/*
- * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
- * beyond (at a higher virtual address and file offset than) the vma.
- *
- * We cannot merge two vmas if they have differently assigned (non-NULL)
- * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
- */
-static int
-can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
-       struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
-{
-       if (is_mergeable_vma(vma, file, vm_flags) &&
-           is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
-               pgoff_t vm_pglen;
-               vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-               if (vma->vm_pgoff + vm_pglen == vm_pgoff)
-                       return 1;
-       }
-       return 0;
-}
-
-/*
- * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
- * whether that can be merged with its predecessor or its successor.
- * Or both (it neatly fills a hole).
- *
- * In most cases - when called for mmap, brk or mremap - [addr,end) is
- * certain not to be mapped by the time vma_merge is called; but when
- * called for mprotect, it is certain to be already mapped (either at
- * an offset within prev, or at the start of next), and the flags of
- * this area are about to be changed to vm_flags - and the no-change
- * case has already been eliminated.
- *
- * The following mprotect cases have to be considered, where AAAA is
- * the area passed down from mprotect_fixup, never extending beyond one
- * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
- *
- *     AAAA             AAAA                AAAA          AAAA
- *    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPNNNNXXXX
- *    cannot merge    might become    might become    might become
- *                    PPNNNNNNNNNN    PPPPPPPPPPNN    PPPPPPPPPPPP 6 or
- *    mmap, brk or    case 4 below    case 5 below    PPPPPPPPXXXX 7 or
- *    mremap move:                                    PPPPNNNNNNNN 8
- *        AAAA
- *    PPPP    NNNN    PPPPPPPPPPPP    PPPPPPPPNNNN    PPPPNNNNNNNN
- *    might become    case 1 below    case 2 below    case 3 below
- *
- * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
- * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
- */
-struct vm_area_struct *vma_merge(struct mm_struct *mm,
-                       struct vm_area_struct *prev, unsigned long addr,
-                       unsigned long end, unsigned long vm_flags,
-                       struct anon_vma *anon_vma, struct file *file,
-                       pgoff_t pgoff, struct mempolicy *policy)
-{
-       pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
-       struct vm_area_struct *area, *next;
-
-       /*
-        * We later require that vma->vm_flags == vm_flags,
-        * so this tests vma->vm_flags & VM_SPECIAL, too.
-        */
-       if (vm_flags & VM_SPECIAL)
-               return NULL;
-
-       if (prev)
-               next = prev->vm_next;
-       else
-               next = mm->mmap;
-       area = next;
-       if (next && next->vm_end == end)                /* cases 6, 7, 8 */
-               next = next->vm_next;
-
-       /*
-        * Can it merge with the predecessor?
-        */
-       if (prev && prev->vm_end == addr &&
-                       mpol_equal(vma_policy(prev), policy) &&
-                       can_vma_merge_after(prev, vm_flags,
-                                               anon_vma, file, pgoff)) {
-               /*
-                * OK, it can.  Can we now merge in the successor as well?
-                */
-               if (next && end == next->vm_start &&
-                               mpol_equal(policy, vma_policy(next)) &&
-                               can_vma_merge_before(next, vm_flags,
-                                       anon_vma, file, pgoff+pglen) &&
-                               is_mergeable_anon_vma(prev->anon_vma,
-                                                     next->anon_vma)) {
-                                                       /* cases 1, 6 */
-                       vma_adjust(prev, prev->vm_start,
-                               next->vm_end, prev->vm_pgoff, NULL);
-               } else                                  /* cases 2, 5, 7 */
-                       vma_adjust(prev, prev->vm_start,
-                               end, prev->vm_pgoff, NULL);
-               return prev;
-       }
-
-       /*
-        * Can this new request be merged in front of next?
-        */
-       if (next && end == next->vm_start &&
-                       mpol_equal(policy, vma_policy(next)) &&
-                       can_vma_merge_before(next, vm_flags,
-                                       anon_vma, file, pgoff+pglen)) {
-               if (prev && addr < prev->vm_end)        /* case 4 */
-                       vma_adjust(prev, prev->vm_start,
-                               addr, prev->vm_pgoff, NULL);
-               else                                    /* cases 3, 8 */
-                       vma_adjust(area, addr, next->vm_end,
-                               next->vm_pgoff - pglen, NULL);
-               return area;
-       }
-
-       return NULL;
-}
-
-/*
- * find_mergeable_anon_vma is used by anon_vma_prepare, to check
- * neighbouring vmas for a suitable anon_vma, before it goes off
- * to allocate a new anon_vma.  It checks because a repetitive
- * sequence of mprotects and faults may otherwise lead to distinct
- * anon_vmas being allocated, preventing vma merge in subsequent
- * mprotect.
- */
-struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
-{
-       struct vm_area_struct *near;
-       unsigned long vm_flags;
-
-       near = vma->vm_next;
-       if (!near)
-               goto try_prev;
-
-       /*
-        * Since only mprotect tries to remerge vmas, match flags
-        * which might be mprotected into each other later on.
-        * Neither mlock nor madvise tries to remerge at present,
-        * so leave their flags as obstructing a merge.
-        */
-       vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
-       vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
-
-       if (near->anon_vma && vma->vm_end == near->vm_start &&
-                       mpol_equal(vma_policy(vma), vma_policy(near)) &&
-                       can_vma_merge_before(near, vm_flags,
-                               NULL, vma->vm_file, vma->vm_pgoff +
-                               ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)))
-               return near->anon_vma;
-try_prev:
-       /*
-        * It is potentially slow to have to call find_vma_prev here.
-        * But it's only on the first write fault on the vma, not
-        * every time, and we could devise a way to avoid it later
-        * (e.g. stash info in next's anon_vma_node when assigning
-        * an anon_vma, or when trying vma_merge).  Another time.
-        */
-       if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma)
-               BUG();
-       if (!near)
-               goto none;
-
-       vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
-       vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
-
-       if (near->anon_vma && near->vm_end == vma->vm_start &&
-                       mpol_equal(vma_policy(near), vma_policy(vma)) &&
-                       can_vma_merge_after(near, vm_flags,
-                               NULL, vma->vm_file, vma->vm_pgoff))
-               return near->anon_vma;
-none:
-       /*
-        * There's no absolute need to look only at touching neighbours:
-        * we could search further afield for "compatible" anon_vmas.
-        * But it would probably just be a waste of time searching,
-        * or lead to too many vmas hanging off the same anon_vma.
-        * We're trying to allow mprotect remerging later on,
-        * not trying to minimize memory used for anon_vmas.
-        */
-       return NULL;
-}
-
-/*
- * The caller must hold down_write(current->mm->mmap_sem).
- */
-
-unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
-                       unsigned long len, unsigned long prot,
-                       unsigned long flags, unsigned long pgoff)
-{
-       struct mm_struct * mm = current->mm;
-       struct vm_area_struct * vma, * prev;
-       struct inode *inode;
-       unsigned int vm_flags;
-       int correct_wcount = 0;
-       int error;
-       struct rb_node ** rb_link, * rb_parent;
-       int accountable = 1;
-       unsigned long charged = 0;
-
-       if (file) {
-               if (is_file_hugepages(file))
-                       accountable = 0;
-
-               if (!file->f_op || !file->f_op->mmap)
-                       return -ENODEV;
-
-               if ((prot & PROT_EXEC) &&
-                   (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
-                       return -EPERM;
-       }
-
-       if (!len)
-               return addr;
-
-       /* Careful about overflows.. */
-       len = PAGE_ALIGN(len);
-       if (!len || len > TASK_SIZE)
-               return -EINVAL;
-
-       /* offset overflow? */
-       if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
-               return -EINVAL;
-
-       /* Too many mappings? */
-       if (mm->map_count > sysctl_max_map_count)
-               return -ENOMEM;
-
-       /* Obtain the address to map to. we verify (or select) it and ensure
-        * that it represents a valid section of the address space.
-        */
-       addr = get_unmapped_area(file, addr, len, pgoff, flags);
-       if (addr & ~PAGE_MASK)
-               return addr;
-
-       /* Do simple checking here so the lower-level routines won't have
-        * to. we assume access permissions have been handled by the open
-        * of the memory object, so we don't do any here.
-        */
-       vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
-                       mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
-
-       if (flags & MAP_LOCKED) {
-               if (!capable(CAP_IPC_LOCK))
-                       return -EPERM;
-               vm_flags |= VM_LOCKED;
-       }
-       /* mlock MCL_FUTURE? */
-       if (vm_flags & VM_LOCKED) {
-               unsigned long locked = mm->locked_vm << PAGE_SHIFT;
-               locked += len;
-               if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
-                       return -EAGAIN;
-       }
-
-       inode = file ? file->f_dentry->d_inode : NULL;
-
-       if (file) {
-               switch (flags & MAP_TYPE) {
-               case MAP_SHARED:
-                       if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
-                               return -EACCES;
-
-                       /*
-                        * Make sure we don't allow writing to an append-only
-                        * file..
-                        */
-                       if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
-                               return -EACCES;
-
-                       /*
-                        * Make sure there are no mandatory locks on the file.
-                        */
-                       if (locks_verify_locked(inode))
-                               return -EAGAIN;
-
-                       vm_flags |= VM_SHARED | VM_MAYSHARE;
-                       if (!(file->f_mode & FMODE_WRITE))
-                               vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
-
-                       /* fall through */
-               case MAP_PRIVATE:
-                       if (!(file->f_mode & FMODE_READ))
-                               return -EACCES;
-                       break;
-
-               default:
-                       return -EINVAL;
-               }
-       } else {
-               switch (flags & MAP_TYPE) {
-               case MAP_SHARED:
-                       vm_flags |= VM_SHARED | VM_MAYSHARE;
-                       break;
-               case MAP_PRIVATE:
-                       /*
-                        * Set pgoff according to addr for anon_vma.
-                        */
-                       pgoff = addr >> PAGE_SHIFT;
-                       break;
-               default:
-                       return -EINVAL;
-               }
-       }
-
-       error = security_file_mmap(file, prot, flags);
-       if (error)
-               return error;
-               
-       /* Clear old maps */
-       error = -ENOMEM;
-munmap_back:
-       vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
-       if (vma && vma->vm_start < addr + len) {
-               if (do_munmap(mm, addr, len))
-                       return -ENOMEM;
-               goto munmap_back;
-       }
-
-       /* Check against address space limit. */
-       if ((mm->total_vm << PAGE_SHIFT) + len
-           > current->rlim[RLIMIT_AS].rlim_cur)
-               return -ENOMEM;
-
-       if (accountable && (!(flags & MAP_NORESERVE) ||
-                       sysctl_overcommit_memory > 1)) {
-               if (vm_flags & VM_SHARED) {
-                       /* Check memory availability in shmem_file_setup? */
-                       vm_flags |= VM_ACCOUNT;
-               } else if (vm_flags & VM_WRITE) {
-                       /*
-                        * Private writable mapping: check memory availability
-                        */
-                       charged = len >> PAGE_SHIFT;
-                       if (security_vm_enough_memory(charged))
-                               return -ENOMEM;
-                       vm_flags |= VM_ACCOUNT;
-               }
-       }
-
-       /*
-        * Can we just expand an old private anonymous mapping?
-        * The VM_SHARED test is necessary because shmem_zero_setup
-        * will create the file object for a shared anonymous map below.
-        */
-       if (!file && !(vm_flags & VM_SHARED) &&
-           vma_merge(mm, prev, addr, addr + len, vm_flags,
-                                       NULL, NULL, pgoff, NULL))
-               goto out;
-
-       /*
-        * Determine the object being mapped and call the appropriate
-        * specific mapper. the address has already been validated, but
-        * not unmapped, but the maps are removed from the list.
-        */
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-       if (!vma) {
-               error = -ENOMEM;
-               goto unacct_error;
-       }
-       memset(vma, 0, sizeof(*vma));
-
-       vma->vm_mm = mm;
-       vma->vm_start = addr;
-       vma->vm_end = addr + len;
-       vma->vm_flags = vm_flags;
-       vma->vm_page_prot = protection_map[vm_flags & 0x0f];
-       vma->vm_pgoff = pgoff;
-
-       if (file) {
-               error = -EINVAL;
-               if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
-                       goto free_vma;
-               if (vm_flags & VM_DENYWRITE) {
-                       error = deny_write_access(file);
-                       if (error)
-                               goto free_vma;
-                       correct_wcount = 1;
-               }
-               vma->vm_file = file;
-               get_file(file);
-               error = file->f_op->mmap(file, vma);
-               if (error)
-                       goto unmap_and_free_vma;
-       } else if (vm_flags & VM_SHARED) {
-               error = shmem_zero_setup(vma);
-               if (error)
-                       goto free_vma;
-       }
-
-       /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
-        * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
-        * that memory reservation must be checked; but that reservation
-        * belongs to shared memory object, not to vma: so now clear it.
-        */
-       if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT))
-               vma->vm_flags &= ~VM_ACCOUNT;
-
-       /* Can addr have changed??
-        *
-        * Answer: Yes, several device drivers can do it in their
-        *         f_op->mmap method. -DaveM
-        */
-       addr = vma->vm_start;
-
-       if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
-                       vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
-               vma_link(mm, vma, prev, rb_link, rb_parent);
-               if (correct_wcount)
-                       atomic_inc(&inode->i_writecount);
-       } else {
-               if (file) {
-                       if (correct_wcount)
-                               atomic_inc(&inode->i_writecount);
-                       fput(file);
-               }
-               mpol_free(vma_policy(vma));
-               kmem_cache_free(vm_area_cachep, vma);
-       }
-out:   
-       mm->total_vm += len >> PAGE_SHIFT;
-       if (vm_flags & VM_LOCKED) {
-               mm->locked_vm += len >> PAGE_SHIFT;
-               make_pages_present(addr, addr + len);
-       }
-       if (flags & MAP_POPULATE) {
-               up_write(&mm->mmap_sem);
-               sys_remap_file_pages(addr, len, 0,
-                                       pgoff, flags & MAP_NONBLOCK);
-               down_write(&mm->mmap_sem);
-       }
-       return addr;
-
-unmap_and_free_vma:
-       if (correct_wcount)
-               atomic_inc(&inode->i_writecount);
-       vma->vm_file = NULL;
-       fput(file);
-
-       /* Undo any partial mapping done by a device driver. */
-       zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
-free_vma:
-       kmem_cache_free(vm_area_cachep, vma);
-unacct_error:
-       if (charged)
-               vm_unacct_memory(charged);
-       return error;
-}
-
-EXPORT_SYMBOL(do_mmap_pgoff);
-
-/* Get an address range which is currently unmapped.
- * For shmat() with addr=0.
- *
- * Ugly calling convention alert:
- * Return value with the low bits set means error value,
- * ie
- *     if (ret & ~PAGE_MASK)
- *             error = ret;
- *
- * This function "knows" that -ENOMEM has the bits set.
- */
-#ifndef HAVE_ARCH_UNMAPPED_AREA
-static inline unsigned long
-arch_get_unmapped_area(struct file *filp, unsigned long addr,
-               unsigned long len, unsigned long pgoff, unsigned long flags)
-{
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       unsigned long start_addr;
-
-       if (len > TASK_SIZE)
-               return -ENOMEM;
-
-       if (addr) {
-               addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
-                       return addr;
-       }
-       start_addr = addr = mm->free_area_cache;
-
-full_search:
-       for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
-               /* At this point:  (!vma || addr < vma->vm_end). */
-               if (TASK_SIZE - len < addr) {
-                       /*
-                        * Start a new search - just in case we missed
-                        * some holes.
-                        */
-                       if (start_addr != TASK_UNMAPPED_BASE) {
-                               start_addr = addr = TASK_UNMAPPED_BASE;
-                               goto full_search;
-                       }
-                       return -ENOMEM;
-               }
-               if (!vma || addr + len <= vma->vm_start) {
-                       /*
-                        * Remember the place where we stopped the search:
-                        */
-                       mm->free_area_cache = addr + len;
-                       return addr;
-               }
-               addr = vma->vm_end;
-       }
-}
-#else
-extern unsigned long
-arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
-                       unsigned long, unsigned long);
-#endif 
-
-#ifndef HAVE_ARCH_CHECK_FIXED_MAPPING
-#define arch_check_fixed_mapping(_file,_addr,_len,_pgoff,_flags) 0
-#else
-extern unsigned long
-arch_check_fixed_mapping(struct file *, unsigned long, unsigned long,
-                       unsigned long, unsigned long);
-#endif
-
-unsigned long
-get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
-               unsigned long pgoff, unsigned long flags)
-{
-       if (flags & MAP_FIXED) {
-               unsigned long ret;
-
-               if (addr > TASK_SIZE - len)
-                       return -ENOMEM;
-               if (addr & ~PAGE_MASK)
-                       return -EINVAL;
-               ret = arch_check_fixed_mapping(file, addr, len, pgoff, flags);
-               if (ret != 0)
-                       return ret;
-               if (file && is_file_hugepages(file))  {
-                       /*
-                        * Check if the given range is hugepage aligned, and
-                        * can be made suitable for hugepages.
-                        */
-                       ret = prepare_hugepage_range(addr, len);
-               } else {
-                       /*
-                        * Ensure that a normal request is not falling in a
-                        * reserved hugepage range.  For some archs like IA-64,
-                        * there is a separate region for hugepages.
-                        */
-                       ret = is_hugepage_only_range(addr, len);
-               }
-               if (ret)
-                       return -EINVAL;
-               return addr;
-       }
-
-       if (file && file->f_op && file->f_op->get_unmapped_area)
-               return file->f_op->get_unmapped_area(file, addr, len,
-                                               pgoff, flags);
-
-       return arch_get_unmapped_area(file, addr, len, pgoff, flags);
-}
-
-EXPORT_SYMBOL(get_unmapped_area);
-
-/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
-struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
-{
-       struct vm_area_struct *vma = NULL;
-
-       if (mm) {
-               /* Check the cache first. */
-               /* (Cache hit rate is typically around 35%.) */
-               vma = mm->mmap_cache;
-               if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
-                       struct rb_node * rb_node;
-
-                       rb_node = mm->mm_rb.rb_node;
-                       vma = NULL;
-
-                       while (rb_node) {
-                               struct vm_area_struct * vma_tmp;
-
-                               vma_tmp = rb_entry(rb_node,
-                                               struct vm_area_struct, vm_rb);
-
-                               if (vma_tmp->vm_end > addr) {
-                                       vma = vma_tmp;
-                                       if (vma_tmp->vm_start <= addr)
-                                               break;
-                                       rb_node = rb_node->rb_left;
-                               } else
-                                       rb_node = rb_node->rb_right;
-                       }
-                       if (vma)
-                               mm->mmap_cache = vma;
-               }
-       }
-       return vma;
-}
-
-EXPORT_SYMBOL(find_vma);
-
-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
-struct vm_area_struct *
-find_vma_prev(struct mm_struct *mm, unsigned long addr,
-                       struct vm_area_struct **pprev)
-{
-       struct vm_area_struct *vma = NULL, *prev = NULL;
-       struct rb_node * rb_node;
-       if (!mm)
-               goto out;
-
-       /* Guard against addr being lower than the first VMA */
-       vma = mm->mmap;
-
-       /* Go through the RB tree quickly. */
-       rb_node = mm->mm_rb.rb_node;
-
-       while (rb_node) {
-               struct vm_area_struct *vma_tmp;
-               vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
-
-               if (addr < vma_tmp->vm_end) {
-                       rb_node = rb_node->rb_left;
-               } else {
-                       prev = vma_tmp;
-                       if (!prev->vm_next || (addr < prev->vm_next->vm_end))
-                               break;
-                       rb_node = rb_node->rb_right;
-               }
-       }
-
-out:
-       *pprev = prev;
-       return prev ? prev->vm_next : vma;
-}
-
-#ifdef CONFIG_STACK_GROWSUP
-/*
- * vma is the first one with address > vma->vm_end.  Have to extend vma.
- */
-int expand_stack(struct vm_area_struct * vma, unsigned long address)
-{
-       unsigned long grow;
-
-       if (!(vma->vm_flags & VM_GROWSUP))
-               return -EFAULT;
-
-       /*
-        * We must make sure the anon_vma is allocated
-        * so that the anon_vma locking is not a noop.
-        */
-       if (unlikely(anon_vma_prepare(vma)))
-               return -ENOMEM;
-       anon_vma_lock(vma);
-
-       /*
-        * vma->vm_start/vm_end cannot change under us because the caller
-        * is required to hold the mmap_sem in read mode.  We need the
-        * anon_vma lock to serialize against concurrent expand_stacks.
-        */
-       address += 4 + PAGE_SIZE - 1;
-       address &= PAGE_MASK;
-       grow = (address - vma->vm_end) >> PAGE_SHIFT;
-
-       /* Overcommit.. */
-       if (security_vm_enough_memory(grow)) {
-               anon_vma_unlock(vma);
-               return -ENOMEM;
-       }
-       
-       if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur ||
-                       ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
-                       current->rlim[RLIMIT_AS].rlim_cur) {
-               anon_vma_unlock(vma);
-               vm_unacct_memory(grow);
-               return -ENOMEM;
-       }
-       vma->vm_end = address;
-       vma->vm_mm->total_vm += grow;
-       if (vma->vm_flags & VM_LOCKED)
-               vma->vm_mm->locked_vm += grow;
-       anon_vma_unlock(vma);
-       return 0;
-}
-
-struct vm_area_struct *
-find_extend_vma(struct mm_struct *mm, unsigned long addr)
-{
-       struct vm_area_struct *vma, *prev;
-
-       addr &= PAGE_MASK;
-       vma = find_vma_prev(mm, addr, &prev);
-       if (vma && (vma->vm_start <= addr))
-               return vma;
-       if (!prev || expand_stack(prev, addr))
-               return NULL;
-       if (prev->vm_flags & VM_LOCKED) {
-               make_pages_present(addr, prev->vm_end);
-       }
-       return prev;
-}
-#else
-/*
- * vma is the first one with address < vma->vm_start.  Have to extend vma.
- */
-int expand_stack(struct vm_area_struct *vma, unsigned long address)
-{
-       unsigned long grow;
-
-       /*
-        * We must make sure the anon_vma is allocated
-        * so that the anon_vma locking is not a noop.
-        */
-       if (unlikely(anon_vma_prepare(vma)))
-               return -ENOMEM;
-       anon_vma_lock(vma);
-
-       /*
-        * vma->vm_start/vm_end cannot change under us because the caller
-        * is required to hold the mmap_sem in read mode.  We need the
-        * anon_vma lock to serialize against concurrent expand_stacks.
-        */
-       address &= PAGE_MASK;
-       grow = (vma->vm_start - address) >> PAGE_SHIFT;
-
-       /* Overcommit.. */
-       if (security_vm_enough_memory(grow)) {
-               anon_vma_unlock(vma);
-               return -ENOMEM;
-       }
-       
-       if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
-                       ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
-                       current->rlim[RLIMIT_AS].rlim_cur) {
-               anon_vma_unlock(vma);
-               vm_unacct_memory(grow);
-               return -ENOMEM;
-       }
-       vma->vm_start = address;
-       vma->vm_pgoff -= grow;
-       vma->vm_mm->total_vm += grow;
-       if (vma->vm_flags & VM_LOCKED)
-               vma->vm_mm->locked_vm += grow;
-       anon_vma_unlock(vma);
-       return 0;
-}
-
-struct vm_area_struct *
-find_extend_vma(struct mm_struct * mm, unsigned long addr)
-{
-       struct vm_area_struct * vma;
-       unsigned long start;
-
-       addr &= PAGE_MASK;
-       vma = find_vma(mm,addr);
-       if (!vma)
-               return NULL;
-       if (vma->vm_start <= addr)
-               return vma;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               return NULL;
-       start = vma->vm_start;
-       if (expand_stack(vma, addr))
-               return NULL;
-       if (vma->vm_flags & VM_LOCKED) {
-               make_pages_present(addr, start);
-       }
-       return vma;
-}
-#endif
-
-/*
- * Try to free as many page directory entries as we can,
- * without having to work very hard at actually scanning
- * the page tables themselves.
- *
- * Right now we try to free page tables if we have a nice
- * PGDIR-aligned area that got free'd up. We could be more
- * granular if we want to, but this is fast and simple,
- * and covers the bad cases.
- *
- * "prev", if it exists, points to a vma before the one
- * we just free'd - but there's no telling how much before.
- */
-static void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
-       unsigned long start, unsigned long end)
-{
-       unsigned long first = start & PGDIR_MASK;
-       unsigned long last = end + PGDIR_SIZE - 1;
-       unsigned long start_index, end_index;
-       struct mm_struct *mm = tlb->mm;
-
-       if (!prev) {
-               prev = mm->mmap;
-               if (!prev)
-                       goto no_mmaps;
-               if (prev->vm_end > start) {
-                       if (last > prev->vm_start)
-                               last = prev->vm_start;
-                       goto no_mmaps;
-               }
-       }
-       for (;;) {
-               struct vm_area_struct *next = prev->vm_next;
-
-               if (next) {
-                       if (next->vm_start < start) {
-                               prev = next;
-                               continue;
-                       }
-                       if (last > next->vm_start)
-                               last = next->vm_start;
-               }
-               if (prev->vm_end > first)
-                       first = prev->vm_end + PGDIR_SIZE - 1;
-               break;
-       }
-no_mmaps:
-       if (last < first)       /* for arches with discontiguous pgd indices */
-               return;
-       /*
-        * If the PGD bits are not consecutive in the virtual address, the
-        * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
-        */
-       start_index = pgd_index(first);
-       if (start_index < FIRST_USER_PGD_NR)
-               start_index = FIRST_USER_PGD_NR;
-       end_index = pgd_index(last);
-       if (end_index > start_index) {
-               clear_page_tables(tlb, start_index, end_index - start_index);
-               flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK);
-       }
-}
-
-/* Normal function to fix up a mapping
- * This function is the default for when an area has no specific
- * function.  This may be used as part of a more specific routine.
- *
- * By the time this function is called, the area struct has been
- * removed from the process mapping list.
- */
-static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
-{
-       size_t len = area->vm_end - area->vm_start;
-
-       area->vm_mm->total_vm -= len >> PAGE_SHIFT;
-       if (area->vm_flags & VM_LOCKED)
-               area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
-       /*
-        * Is this a new hole at the lowest possible address?
-        */
-       if (area->vm_start >= TASK_UNMAPPED_BASE &&
-                               area->vm_start < area->vm_mm->free_area_cache)
-             area->vm_mm->free_area_cache = area->vm_start;
-
-       remove_vm_struct(area);
-}
-
-/*
- * Update the VMA and inode share lists.
- *
- * Ok - we have the memory areas we should free on the 'free' list,
- * so release them, and do the vma updates.
- */
-static void unmap_vma_list(struct mm_struct *mm,
-       struct vm_area_struct *mpnt)
-{
-       do {
-               struct vm_area_struct *next = mpnt->vm_next;
-               unmap_vma(mm, mpnt);
-               mpnt = next;
-       } while (mpnt != NULL);
-       validate_mm(mm);
-}
-
-/*
- * Get rid of page table information in the indicated region.
- *
- * Called with the page table lock held.
- */
-static void unmap_region(struct mm_struct *mm,
-       struct vm_area_struct *vma,
-       struct vm_area_struct *prev,
-       unsigned long start,
-       unsigned long end)
-{
-       struct mmu_gather *tlb;
-       unsigned long nr_accounted = 0;
-
-       lru_add_drain();
-       tlb = tlb_gather_mmu(mm, 0);
-       unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL);
-       vm_unacct_memory(nr_accounted);
-
-       if (is_hugepage_only_range(start, end - start))
-               hugetlb_free_pgtables(tlb, prev, start, end);
-       else
-               free_pgtables(tlb, prev, start, end);
-       tlb_finish_mmu(tlb, start, end);
-}
-
-/*
- * Create a list of vma's touched by the unmap, removing them from the mm's
- * vma list as we go..
- */
-static void
-detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
-       struct vm_area_struct *prev, unsigned long end)
-{
-       struct vm_area_struct **insertion_point;
-       struct vm_area_struct *tail_vma = NULL;
-
-       insertion_point = (prev ? &prev->vm_next : &mm->mmap);
-       do {
-               rb_erase(&vma->vm_rb, &mm->mm_rb);
-               mm->map_count--;
-               tail_vma = vma;
-               vma = vma->vm_next;
-       } while (vma && vma->vm_start < end);
-       *insertion_point = vma;
-       tail_vma->vm_next = NULL;
-       mm->mmap_cache = NULL;          /* Kill the cache. */
-}
-
-/*
- * Split a vma into two pieces at address 'addr', a new vma is allocated
- * either for the first part or the the tail.
- */
-int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
-             unsigned long addr, int new_below)
-{
-       struct mempolicy *pol;
-       struct vm_area_struct *new;
-
-       if (mm->map_count >= sysctl_max_map_count)
-               return -ENOMEM;
-
-       new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-       if (!new)
-               return -ENOMEM;
-
-       /* most fields are the same, copy all, and then fixup */
-       *new = *vma;
-       vma_prio_tree_init(new);
-
-       if (new_below)
-               new->vm_end = addr;
-       else {
-               new->vm_start = addr;
-               new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
-       }
-
-       pol = mpol_copy(vma_policy(vma));
-       if (IS_ERR(pol)) {
-               kmem_cache_free(vm_area_cachep, new);
-               return PTR_ERR(pol);
-       }
-       vma_set_policy(new, pol);
-
-       if (new->vm_file)
-               get_file(new->vm_file);
-
-       if (new->vm_ops && new->vm_ops->open)
-               new->vm_ops->open(new);
-
-       if (new_below)
-               vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
-                       ((addr - new->vm_start) >> PAGE_SHIFT), new);
-       else
-               vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
-
-       return 0;
-}
-
-/* Munmap is split into 2 main parts -- this part which finds
- * what needs doing, and the areas themselves, which do the
- * work.  This now handles partial unmappings.
- * Jeremy Fitzhardinge <jeremy@goop.org>
- */
-int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
-{
-       unsigned long end;
-       struct vm_area_struct *mpnt, *prev, *last;
-
-       if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
-               return -EINVAL;
-
-       if ((len = PAGE_ALIGN(len)) == 0)
-               return -EINVAL;
-
-       /* Find the first overlapping VMA */
-       mpnt = find_vma_prev(mm, start, &prev);
-       if (!mpnt)
-               return 0;
-       /* we have  start < mpnt->vm_end  */
-
-       if (is_vm_hugetlb_page(mpnt)) {
-               int ret = is_aligned_hugepage_range(start, len);
-
-               if (ret)
-                       return ret;
-       }
-
-       /* if it doesn't overlap, we have nothing.. */
-       end = start + len;
-       if (mpnt->vm_start >= end)
-               return 0;
-
-       /* Something will probably happen, so notify. */
-       if (mpnt->vm_file && (mpnt->vm_flags & VM_EXEC))
-               profile_exec_unmap(mm);
-       /*
-        * If we need to split any vma, do it now to save pain later.
-        *
-        * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
-        * unmapped vm_area_struct will remain in use: so lower split_vma
-        * places tmp vma above, and higher split_vma places tmp vma below.
-        */
-       if (start > mpnt->vm_start) {
-               if (split_vma(mm, mpnt, start, 0))
-                       return -ENOMEM;
-               prev = mpnt;
-       }
-
-       /* Does it split the last one? */
-       last = find_vma(mm, end);
-       if (last && end > last->vm_start) {
-               if (split_vma(mm, last, end, 1))
-                       return -ENOMEM;
-       }
-       mpnt = prev? prev->vm_next: mm->mmap;
-
-       /*
-        * Remove the vma's, and unmap the actual pages
-        */
-       detach_vmas_to_be_unmapped(mm, mpnt, prev, end);
-       spin_lock(&mm->page_table_lock);
-       unmap_region(mm, mpnt, prev, start, end);
-       spin_unlock(&mm->page_table_lock);
-
-       /* Fix up all other VM information */
-       unmap_vma_list(mm, mpnt);
-
-       return 0;
-}
-
-EXPORT_SYMBOL(do_munmap);
-
-asmlinkage long sys_munmap(unsigned long addr, size_t len)
-{
-       int ret;
-       struct mm_struct *mm = current->mm;
-
-       down_write(&mm->mmap_sem);
-       ret = do_munmap(mm, addr, len);
-       up_write(&mm->mmap_sem);
-       return ret;
-}
-
-/*
- *  this is really a simplified "do_mmap".  it only handles
- *  anonymous maps.  eventually we may be able to do some
- *  brk-specific accounting here.
- */
-unsigned long do_brk(unsigned long addr, unsigned long len)
-{
-       struct mm_struct * mm = current->mm;
-       struct vm_area_struct * vma, * prev;
-       unsigned long flags;
-       struct rb_node ** rb_link, * rb_parent;
-       pgoff_t pgoff = addr >> PAGE_SHIFT;
-
-       len = PAGE_ALIGN(len);
-       if (!len)
-               return addr;
-
-       if ((addr + len) > TASK_SIZE || (addr + len) < addr)
-               return -EINVAL;
-
-       /*
-        * mlock MCL_FUTURE?
-        */
-       if (mm->def_flags & VM_LOCKED) {
-               unsigned long locked = mm->locked_vm << PAGE_SHIFT;
-               locked += len;
-               if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
-                       return -EAGAIN;
-       }
-
-       /*
-        * Clear old maps.  this also does some error checking for us
-        */
- munmap_back:
-       vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
-       if (vma && vma->vm_start < addr + len) {
-               if (do_munmap(mm, addr, len))
-                       return -ENOMEM;
-               goto munmap_back;
-       }
-
-       /* Check against address space limits *after* clearing old maps... */
-       if ((mm->total_vm << PAGE_SHIFT) + len
-           > current->rlim[RLIMIT_AS].rlim_cur)
-               return -ENOMEM;
-
-       if (mm->map_count > sysctl_max_map_count)
-               return -ENOMEM;
-
-       if (security_vm_enough_memory(len >> PAGE_SHIFT))
-               return -ENOMEM;
-
-       flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
-
-       /* Can we just expand an old private anonymous mapping? */
-       if (vma_merge(mm, prev, addr, addr + len, flags,
-                                       NULL, NULL, pgoff, NULL))
-               goto out;
-
-       /*
-        * create a vma struct for an anonymous mapping
-        */
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-       if (!vma) {
-               vm_unacct_memory(len >> PAGE_SHIFT);
-               return -ENOMEM;
-       }
-       memset(vma, 0, sizeof(*vma));
-
-       vma->vm_mm = mm;
-       vma->vm_start = addr;
-       vma->vm_end = addr + len;
-       vma->vm_pgoff = pgoff;
-       vma->vm_flags = flags;
-       vma->vm_page_prot = protection_map[flags & 0x0f];
-       vma_link(mm, vma, prev, rb_link, rb_parent);
-out:
-       mm->total_vm += len >> PAGE_SHIFT;
-       if (flags & VM_LOCKED) {
-               mm->locked_vm += len >> PAGE_SHIFT;
-               make_pages_present(addr, addr + len);
-       }
-       return addr;
-}
-
-EXPORT_SYMBOL(do_brk);
-
-/* Release all mmaps. */
-void exit_mmap(struct mm_struct *mm)
-{
-       struct mmu_gather *tlb;
-       struct vm_area_struct *vma;
-       unsigned long nr_accounted = 0;
-
-       profile_exit_mmap(mm);
-       lru_add_drain();
-
-       spin_lock(&mm->page_table_lock);
-
-       tlb = tlb_gather_mmu(mm, 1);
-       flush_cache_mm(mm);
-       /* Use ~0UL here to ensure all VMAs in the mm are unmapped */
-       mm->map_count -= unmap_vmas(&tlb, mm, mm->mmap, 0,
-                                       ~0UL, &nr_accounted, NULL);
-       vm_unacct_memory(nr_accounted);
-       BUG_ON(mm->map_count);  /* This is just debugging */
-       clear_page_tables(tlb, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
-       tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm));
-
-       vma = mm->mmap;
-       mm->mmap = mm->mmap_cache = NULL;
-       mm->mm_rb = RB_ROOT;
-       mm->rss = 0;
-       mm->total_vm = 0;
-       mm->locked_vm = 0;
-
-       spin_unlock(&mm->page_table_lock);
-
-       /*
-        * Walk the list again, actually closing and freeing it
-        * without holding any MM locks.
-        */
-       while (vma) {
-               struct vm_area_struct *next = vma->vm_next;
-               remove_vm_struct(vma);
-               vma = next;
-       }
-}
-
-/* Insert vm structure into process list sorted by address
- * and into the inode's i_mmap tree.  If vm_file is non-NULL
- * then i_mmap_lock is taken here.
- */
-void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
-{
-       struct vm_area_struct * __vma, * prev;
-       struct rb_node ** rb_link, * rb_parent;
-
-       /*
-        * The vm_pgoff of a purely anonymous vma should be irrelevant
-        * until its first write fault, when page's anon_vma and index
-        * are set.  But now set the vm_pgoff it will almost certainly
-        * end up with (unless mremap moves it elsewhere before that
-        * first wfault), so /proc/pid/maps tells a consistent story.
-        *
-        * By setting it to reflect the virtual start address of the
-        * vma, merges and splits can happen in a seamless way, just
-        * using the existing file pgoff checks and manipulations.
-        * Similarly in do_mmap_pgoff and in do_brk.
-        */
-       if (!vma->vm_file) {
-               BUG_ON(vma->anon_vma);
-               vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
-       }
-       __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
-       if (__vma && __vma->vm_start < vma->vm_end)
-               BUG();
-       vma_link(mm, vma, prev, rb_link, rb_parent);
-}
-
-/*
- * Copy the vma structure to a new location in the same mm,
- * prior to moving page table entries, to effect an mremap move.
- */
-struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
-       unsigned long addr, unsigned long len, pgoff_t pgoff)
-{
-       struct vm_area_struct *vma = *vmap;
-       unsigned long vma_start = vma->vm_start;
-       struct mm_struct *mm = vma->vm_mm;
-       struct vm_area_struct *new_vma, *prev;
-       struct rb_node **rb_link, *rb_parent;
-       struct mempolicy *pol;
-
-       /*
-        * If anonymous vma has not yet been faulted, update new pgoff
-        * to match new location, to increase its chance of merging.
-        */
-       if (!vma->vm_file && !vma->anon_vma)
-               pgoff = addr >> PAGE_SHIFT;
-
-       find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
-       new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
-                       vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
-       if (new_vma) {
-               /*
-                * Source vma may have been merged into new_vma
-                */
-               if (vma_start >= new_vma->vm_start &&
-                   vma_start < new_vma->vm_end)
-                       *vmap = new_vma;
-       } else {
-               new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-               if (new_vma) {
-                       *new_vma = *vma;
-                       vma_prio_tree_init(new_vma);
-                       pol = mpol_copy(vma_policy(vma));
-                       if (IS_ERR(pol)) {
-                               kmem_cache_free(vm_area_cachep, new_vma);
-                               return NULL;
-                       }
-                       vma_set_policy(new_vma, pol);
-                       new_vma->vm_start = addr;
-                       new_vma->vm_end = addr + len;
-                       new_vma->vm_pgoff = pgoff;
-                       if (new_vma->vm_file)
-                               get_file(new_vma->vm_file);
-                       if (new_vma->vm_ops && new_vma->vm_ops->open)
-                               new_vma->vm_ops->open(new_vma);
-                       vma_link(mm, new_vma, prev, rb_link, rb_parent);
-               }
-       }
-       return new_vma;
-}